v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_ARM64
31 
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "cpu-profiler.h"
35 #include "debug.h"
36 #include "isolate-inl.h"
37 #include "runtime.h"
38 
39 namespace v8 {
40 namespace internal {
41 
42 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
43 #define __
44 
45 
46 MacroAssembler::MacroAssembler(Isolate* arg_isolate,
47  byte * buffer,
48  unsigned buffer_size)
49  : Assembler(arg_isolate, buffer, buffer_size),
50  generating_stub_(false),
51 #if DEBUG
52  allow_macro_instructions_(true),
53 #endif
54  has_frame_(false),
55  use_real_aborts_(true),
56  sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) {
57  if (isolate() != NULL) {
58  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
59  isolate());
60  }
61 }
62 
63 
64 void MacroAssembler::LogicalMacro(const Register& rd,
65  const Register& rn,
66  const Operand& operand,
67  LogicalOp op) {
68  UseScratchRegisterScope temps(this);
69 
70  if (operand.NeedsRelocation()) {
71  Register temp = temps.AcquireX();
72  LoadRelocated(temp, operand);
73  Logical(rd, rn, temp, op);
74 
75  } else if (operand.IsImmediate()) {
76  int64_t immediate = operand.immediate();
77  unsigned reg_size = rd.SizeInBits();
78  ASSERT(rd.Is64Bits() || is_uint32(immediate));
79 
80  // If the operation is NOT, invert the operation and immediate.
81  if ((op & NOT) == NOT) {
82  op = static_cast<LogicalOp>(op & ~NOT);
83  immediate = ~immediate;
84  if (rd.Is32Bits()) {
85  immediate &= kWRegMask;
86  }
87  }
88 
89  // Special cases for all set or all clear immediates.
90  if (immediate == 0) {
91  switch (op) {
92  case AND:
93  Mov(rd, 0);
94  return;
95  case ORR: // Fall through.
96  case EOR:
97  Mov(rd, rn);
98  return;
99  case ANDS: // Fall through.
100  case BICS:
101  break;
102  default:
103  UNREACHABLE();
104  }
105  } else if ((rd.Is64Bits() && (immediate == -1L)) ||
106  (rd.Is32Bits() && (immediate == 0xffffffffL))) {
107  switch (op) {
108  case AND:
109  Mov(rd, rn);
110  return;
111  case ORR:
112  Mov(rd, immediate);
113  return;
114  case EOR:
115  Mvn(rd, rn);
116  return;
117  case ANDS: // Fall through.
118  case BICS:
119  break;
120  default:
121  UNREACHABLE();
122  }
123  }
124 
125  unsigned n, imm_s, imm_r;
126  if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
127  // Immediate can be encoded in the instruction.
128  LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
129  } else {
130  // Immediate can't be encoded: synthesize using move immediate.
131  Register temp = temps.AcquireSameSizeAs(rn);
132  Mov(temp, immediate);
133  if (rd.Is(csp)) {
134  // If rd is the stack pointer we cannot use it as the destination
135  // register so we use the temp register as an intermediate again.
136  Logical(temp, rn, temp, op);
137  Mov(csp, temp);
138  } else {
139  Logical(rd, rn, temp, op);
140  }
141  }
142 
143  } else if (operand.IsExtendedRegister()) {
144  ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
145  // Add/sub extended supports shift <= 4. We want to support exactly the
146  // same modes here.
147  ASSERT(operand.shift_amount() <= 4);
148  ASSERT(operand.reg().Is64Bits() ||
149  ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
150  Register temp = temps.AcquireSameSizeAs(rn);
151  EmitExtendShift(temp, operand.reg(), operand.extend(),
152  operand.shift_amount());
153  Logical(rd, rn, temp, op);
154 
155  } else {
156  // The operand can be encoded in the instruction.
157  ASSERT(operand.IsShiftedRegister());
158  Logical(rd, rn, operand, op);
159  }
160 }
161 
162 
163 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
164  ASSERT(allow_macro_instructions_);
165  ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
166  ASSERT(!rd.IsZero());
167 
168  // TODO(all) extend to support more immediates.
169  //
170  // Immediates on Aarch64 can be produced using an initial value, and zero to
171  // three move keep operations.
172  //
173  // Initial values can be generated with:
174  // 1. 64-bit move zero (movz).
175  // 2. 32-bit move inverted (movn).
176  // 3. 64-bit move inverted.
177  // 4. 32-bit orr immediate.
178  // 5. 64-bit orr immediate.
179  // Move-keep may then be used to modify each of the 16-bit half-words.
180  //
181  // The code below supports all five initial value generators, and
182  // applying move-keep operations to move-zero and move-inverted initial
183  // values.
184 
185  unsigned reg_size = rd.SizeInBits();
186  unsigned n, imm_s, imm_r;
187  if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
188  // Immediate can be represented in a move zero instruction. Movz can't
189  // write to the stack pointer.
190  movz(rd, imm);
191  } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
192  // Immediate can be represented in a move inverted instruction. Movn can't
193  // write to the stack pointer.
194  movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
195  } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
196  // Immediate can be represented in a logical orr instruction.
197  LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
198  } else {
199  // Generic immediate case. Imm will be represented by
200  // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
201  // A move-zero or move-inverted is generated for the first non-zero or
202  // non-0xffff immX, and a move-keep for subsequent non-zero immX.
203 
204  uint64_t ignored_halfword = 0;
205  bool invert_move = false;
206  // If the number of 0xffff halfwords is greater than the number of 0x0000
207  // halfwords, it's more efficient to use move-inverted.
208  if (CountClearHalfWords(~imm, reg_size) >
209  CountClearHalfWords(imm, reg_size)) {
210  ignored_halfword = 0xffffL;
211  invert_move = true;
212  }
213 
214  // Mov instructions can't move immediate values into the stack pointer, so
215  // set up a temporary register, if needed.
216  UseScratchRegisterScope temps(this);
217  Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
218 
219  // Iterate through the halfwords. Use movn/movz for the first non-ignored
220  // halfword, and movk for subsequent halfwords.
221  ASSERT((reg_size % 16) == 0);
222  bool first_mov_done = false;
223  for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
224  uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
225  if (imm16 != ignored_halfword) {
226  if (!first_mov_done) {
227  if (invert_move) {
228  movn(temp, (~imm16) & 0xffffL, 16 * i);
229  } else {
230  movz(temp, imm16, 16 * i);
231  }
232  first_mov_done = true;
233  } else {
234  // Construct a wider constant.
235  movk(temp, imm16, 16 * i);
236  }
237  }
238  }
239  ASSERT(first_mov_done);
240 
241  // Move the temporary if the original destination register was the stack
242  // pointer.
243  if (rd.IsSP()) {
244  mov(rd, temp);
245  }
246  }
247 }
248 
249 
250 void MacroAssembler::Mov(const Register& rd,
251  const Operand& operand,
252  DiscardMoveMode discard_mode) {
253  ASSERT(allow_macro_instructions_);
254  ASSERT(!rd.IsZero());
255 
256  // Provide a swap register for instructions that need to write into the
257  // system stack pointer (and can't do this inherently).
258  UseScratchRegisterScope temps(this);
259  Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
260 
261  if (operand.NeedsRelocation()) {
262  LoadRelocated(dst, operand);
263 
264  } else if (operand.IsImmediate()) {
265  // Call the macro assembler for generic immediates.
266  Mov(dst, operand.immediate());
267 
268  } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
269  // Emit a shift instruction if moving a shifted register. This operation
270  // could also be achieved using an orr instruction (like orn used by Mvn),
271  // but using a shift instruction makes the disassembly clearer.
272  EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
273 
274  } else if (operand.IsExtendedRegister()) {
275  // Emit an extend instruction if moving an extended register. This handles
276  // extend with post-shift operations, too.
277  EmitExtendShift(dst, operand.reg(), operand.extend(),
278  operand.shift_amount());
279 
280  } else {
281  // Otherwise, emit a register move only if the registers are distinct, or
282  // if they are not X registers.
283  //
284  // Note that mov(w0, w0) is not a no-op because it clears the top word of
285  // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
286  // registers is not required to clear the top word of the X register. In
287  // this case, the instruction is discarded.
288  //
289  // If csp is an operand, add #0 is emitted, otherwise, orr #0.
290  if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
291  (discard_mode == kDontDiscardForSameWReg))) {
292  Assembler::mov(rd, operand.reg());
293  }
294  // This case can handle writes into the system stack pointer directly.
295  dst = rd;
296  }
297 
298  // Copy the result to the system stack pointer.
299  if (!dst.Is(rd)) {
300  ASSERT(rd.IsSP());
301  Assembler::mov(rd, dst);
302  }
303 }
304 
305 
306 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
307  ASSERT(allow_macro_instructions_);
308 
309  if (operand.NeedsRelocation()) {
310  LoadRelocated(rd, operand);
311  mvn(rd, rd);
312 
313  } else if (operand.IsImmediate()) {
314  // Call the macro assembler for generic immediates.
315  Mov(rd, ~operand.immediate());
316 
317  } else if (operand.IsExtendedRegister()) {
318  // Emit two instructions for the extend case. This differs from Mov, as
319  // the extend and invert can't be achieved in one instruction.
320  EmitExtendShift(rd, operand.reg(), operand.extend(),
321  operand.shift_amount());
322  mvn(rd, rd);
323 
324  } else {
325  mvn(rd, operand);
326  }
327 }
328 
329 
330 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
331  ASSERT((reg_size % 8) == 0);
332  int count = 0;
333  for (unsigned i = 0; i < (reg_size / 16); i++) {
334  if ((imm & 0xffff) == 0) {
335  count++;
336  }
337  imm >>= 16;
338  }
339  return count;
340 }
341 
342 
343 // The movz instruction can generate immediates containing an arbitrary 16-bit
344 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
345 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
346  ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
347  return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
348 }
349 
350 
351 // The movn instruction can generate immediates containing an arbitrary 16-bit
352 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
353 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
354  return IsImmMovz(~imm, reg_size);
355 }
356 
357 
358 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
359  const Operand& operand,
360  StatusFlags nzcv,
361  Condition cond,
363  ASSERT((cond != al) && (cond != nv));
364  if (operand.NeedsRelocation()) {
365  UseScratchRegisterScope temps(this);
366  Register temp = temps.AcquireX();
367  LoadRelocated(temp, operand);
368  ConditionalCompareMacro(rn, temp, nzcv, cond, op);
369 
370  } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
371  (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
372  // The immediate can be encoded in the instruction, or the operand is an
373  // unshifted register: call the assembler.
374  ConditionalCompare(rn, operand, nzcv, cond, op);
375 
376  } else {
377  // The operand isn't directly supported by the instruction: perform the
378  // operation on a temporary register.
379  UseScratchRegisterScope temps(this);
380  Register temp = temps.AcquireSameSizeAs(rn);
381  Mov(temp, operand);
382  ConditionalCompare(rn, temp, nzcv, cond, op);
383  }
384 }
385 
386 
387 void MacroAssembler::Csel(const Register& rd,
388  const Register& rn,
389  const Operand& operand,
390  Condition cond) {
391  ASSERT(allow_macro_instructions_);
392  ASSERT(!rd.IsZero());
393  ASSERT((cond != al) && (cond != nv));
394  if (operand.IsImmediate()) {
395  // Immediate argument. Handle special cases of 0, 1 and -1 using zero
396  // register.
397  int64_t imm = operand.immediate();
398  Register zr = AppropriateZeroRegFor(rn);
399  if (imm == 0) {
400  csel(rd, rn, zr, cond);
401  } else if (imm == 1) {
402  csinc(rd, rn, zr, cond);
403  } else if (imm == -1) {
404  csinv(rd, rn, zr, cond);
405  } else {
406  UseScratchRegisterScope temps(this);
407  Register temp = temps.AcquireSameSizeAs(rn);
408  Mov(temp, operand.immediate());
409  csel(rd, rn, temp, cond);
410  }
411  } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
412  // Unshifted register argument.
413  csel(rd, rn, operand.reg(), cond);
414  } else {
415  // All other arguments.
416  UseScratchRegisterScope temps(this);
417  Register temp = temps.AcquireSameSizeAs(rn);
418  Mov(temp, operand);
419  csel(rd, rn, temp, cond);
420  }
421 }
422 
423 
424 void MacroAssembler::AddSubMacro(const Register& rd,
425  const Register& rn,
426  const Operand& operand,
427  FlagsUpdate S,
428  AddSubOp op) {
429  if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
430  !operand.NeedsRelocation() && (S == LeaveFlags)) {
431  // The instruction would be a nop. Avoid generating useless code.
432  return;
433  }
434 
435  if (operand.NeedsRelocation()) {
436  UseScratchRegisterScope temps(this);
437  Register temp = temps.AcquireX();
438  LoadRelocated(temp, operand);
439  AddSubMacro(rd, rn, temp, S, op);
440  } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
441  (rn.IsZero() && !operand.IsShiftedRegister()) ||
442  (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
443  UseScratchRegisterScope temps(this);
444  Register temp = temps.AcquireSameSizeAs(rn);
445  Mov(temp, operand);
446  AddSub(rd, rn, temp, S, op);
447  } else {
448  AddSub(rd, rn, operand, S, op);
449  }
450 }
451 
452 
453 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
454  const Register& rn,
455  const Operand& operand,
456  FlagsUpdate S,
457  AddSubWithCarryOp op) {
458  ASSERT(rd.SizeInBits() == rn.SizeInBits());
459  UseScratchRegisterScope temps(this);
460 
461  if (operand.NeedsRelocation()) {
462  Register temp = temps.AcquireX();
463  LoadRelocated(temp, operand);
464  AddSubWithCarryMacro(rd, rn, temp, S, op);
465 
466  } else if (operand.IsImmediate() ||
467  (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
468  // Add/sub with carry (immediate or ROR shifted register.)
469  Register temp = temps.AcquireSameSizeAs(rn);
470  Mov(temp, operand);
471  AddSubWithCarry(rd, rn, temp, S, op);
472 
473  } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
474  // Add/sub with carry (shifted register).
475  ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
476  ASSERT(operand.shift() != ROR);
477  ASSERT(is_uintn(operand.shift_amount(),
478  rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
480  Register temp = temps.AcquireSameSizeAs(rn);
481  EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
482  AddSubWithCarry(rd, rn, temp, S, op);
483 
484  } else if (operand.IsExtendedRegister()) {
485  // Add/sub with carry (extended register).
486  ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
487  // Add/sub extended supports a shift <= 4. We want to support exactly the
488  // same modes.
489  ASSERT(operand.shift_amount() <= 4);
490  ASSERT(operand.reg().Is64Bits() ||
491  ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
492  Register temp = temps.AcquireSameSizeAs(rn);
493  EmitExtendShift(temp, operand.reg(), operand.extend(),
494  operand.shift_amount());
495  AddSubWithCarry(rd, rn, temp, S, op);
496 
497  } else {
498  // The addressing mode is directly supported by the instruction.
499  AddSubWithCarry(rd, rn, operand, S, op);
500  }
501 }
502 
503 
504 void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
505  const MemOperand& addr,
506  LoadStoreOp op) {
507  int64_t offset = addr.offset();
509 
510  // Check if an immediate offset fits in the immediate field of the
511  // appropriate instruction. If not, emit two instructions to perform
512  // the operation.
513  if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
514  !IsImmLSUnscaled(offset)) {
515  // Immediate offset that can't be encoded using unsigned or unscaled
516  // addressing modes.
517  UseScratchRegisterScope temps(this);
518  Register temp = temps.AcquireSameSizeAs(addr.base());
519  Mov(temp, addr.offset());
520  LoadStore(rt, MemOperand(addr.base(), temp), op);
521  } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
522  // Post-index beyond unscaled addressing range.
523  LoadStore(rt, MemOperand(addr.base()), op);
524  add(addr.base(), addr.base(), offset);
525  } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
526  // Pre-index beyond unscaled addressing range.
527  add(addr.base(), addr.base(), offset);
528  LoadStore(rt, MemOperand(addr.base()), op);
529  } else {
530  // Encodable in one load/store instruction.
531  LoadStore(rt, addr, op);
532  }
533 }
534 
535 
536 void MacroAssembler::Load(const Register& rt,
537  const MemOperand& addr,
538  Representation r) {
539  ASSERT(!r.IsDouble());
540 
541  if (r.IsInteger8()) {
542  Ldrsb(rt, addr);
543  } else if (r.IsUInteger8()) {
544  Ldrb(rt, addr);
545  } else if (r.IsInteger16()) {
546  Ldrsh(rt, addr);
547  } else if (r.IsUInteger16()) {
548  Ldrh(rt, addr);
549  } else if (r.IsInteger32()) {
550  Ldr(rt.W(), addr);
551  } else {
552  ASSERT(rt.Is64Bits());
553  Ldr(rt, addr);
554  }
555 }
556 
557 
558 void MacroAssembler::Store(const Register& rt,
559  const MemOperand& addr,
560  Representation r) {
561  ASSERT(!r.IsDouble());
562 
563  if (r.IsInteger8() || r.IsUInteger8()) {
564  Strb(rt, addr);
565  } else if (r.IsInteger16() || r.IsUInteger16()) {
566  Strh(rt, addr);
567  } else if (r.IsInteger32()) {
568  Str(rt.W(), addr);
569  } else {
570  ASSERT(rt.Is64Bits());
571  Str(rt, addr);
572  }
573 }
574 
575 
577  Label *label, ImmBranchType b_type) {
578  bool need_longer_range = false;
579  // There are two situations in which we care about the offset being out of
580  // range:
581  // - The label is bound but too far away.
582  // - The label is not bound but linked, and the previous branch
583  // instruction in the chain is too far away.
584  if (label->is_bound() || label->is_linked()) {
585  need_longer_range =
586  !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
587  }
588  if (!need_longer_range && !label->is_bound()) {
589  int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
590  unresolved_branches_.insert(
591  std::pair<int, FarBranchInfo>(max_reachable_pc,
592  FarBranchInfo(pc_offset(), label)));
593  // Also maintain the next pool check.
596  max_reachable_pc - kVeneerDistanceCheckMargin);
597  }
598  return need_longer_range;
599 }
600 
601 
602 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
603  ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
604  (bit == -1 || type >= kBranchTypeFirstUsingBit));
605  if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
606  B(static_cast<Condition>(type), label);
607  } else {
608  switch (type) {
609  case always: B(label); break;
610  case never: break;
611  case reg_zero: Cbz(reg, label); break;
612  case reg_not_zero: Cbnz(reg, label); break;
613  case reg_bit_clear: Tbz(reg, bit, label); break;
614  case reg_bit_set: Tbnz(reg, bit, label); break;
615  default:
616  UNREACHABLE();
617  }
618  }
619 }
620 
621 
622 void MacroAssembler::B(Label* label, Condition cond) {
623  ASSERT(allow_macro_instructions_);
624  ASSERT((cond != al) && (cond != nv));
625 
626  Label done;
627  bool need_extra_instructions =
629 
630  if (need_extra_instructions) {
631  b(&done, InvertCondition(cond));
632  B(label);
633  } else {
634  b(label, cond);
635  }
636  bind(&done);
637 }
638 
639 
640 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
641  ASSERT(allow_macro_instructions_);
642 
643  Label done;
644  bool need_extra_instructions =
646 
647  if (need_extra_instructions) {
648  tbz(rt, bit_pos, &done);
649  B(label);
650  } else {
651  tbnz(rt, bit_pos, label);
652  }
653  bind(&done);
654 }
655 
656 
657 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
658  ASSERT(allow_macro_instructions_);
659 
660  Label done;
661  bool need_extra_instructions =
663 
664  if (need_extra_instructions) {
665  tbnz(rt, bit_pos, &done);
666  B(label);
667  } else {
668  tbz(rt, bit_pos, label);
669  }
670  bind(&done);
671 }
672 
673 
674 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
675  ASSERT(allow_macro_instructions_);
676 
677  Label done;
678  bool need_extra_instructions =
680 
681  if (need_extra_instructions) {
682  cbz(rt, &done);
683  B(label);
684  } else {
685  cbnz(rt, label);
686  }
687  bind(&done);
688 }
689 
690 
691 void MacroAssembler::Cbz(const Register& rt, Label* label) {
692  ASSERT(allow_macro_instructions_);
693 
694  Label done;
695  bool need_extra_instructions =
697 
698  if (need_extra_instructions) {
699  cbnz(rt, &done);
700  B(label);
701  } else {
702  cbz(rt, label);
703  }
704  bind(&done);
705 }
706 
707 
708 // Pseudo-instructions.
709 
710 
711 void MacroAssembler::Abs(const Register& rd, const Register& rm,
712  Label* is_not_representable,
713  Label* is_representable) {
714  ASSERT(allow_macro_instructions_);
715  ASSERT(AreSameSizeAndType(rd, rm));
716 
717  Cmp(rm, 1);
718  Cneg(rd, rm, lt);
719 
720  // If the comparison sets the v flag, the input was the smallest value
721  // representable by rm, and the mathematical result of abs(rm) is not
722  // representable using two's complement.
723  if ((is_not_representable != NULL) && (is_representable != NULL)) {
724  B(is_not_representable, vs);
725  B(is_representable);
726  } else if (is_not_representable != NULL) {
727  B(is_not_representable, vs);
728  } else if (is_representable != NULL) {
729  B(is_representable, vc);
730  }
731 }
732 
733 
734 // Abstracted stack operations.
735 
736 
737 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
738  const CPURegister& src2, const CPURegister& src3) {
739  ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
740 
741  int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
742  int size = src0.SizeInBytes();
743 
744  PrepareForPush(count, size);
745  PushHelper(count, size, src0, src1, src2, src3);
746 }
747 
748 
749 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
750  const CPURegister& src2, const CPURegister& src3,
751  const CPURegister& src4, const CPURegister& src5,
752  const CPURegister& src6, const CPURegister& src7) {
753  ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
754 
755  int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
756  int size = src0.SizeInBytes();
757 
758  PrepareForPush(count, size);
759  PushHelper(4, size, src0, src1, src2, src3);
760  PushHelper(count - 4, size, src4, src5, src6, src7);
761 }
762 
763 
764 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
765  const CPURegister& dst2, const CPURegister& dst3) {
766  // It is not valid to pop into the same register more than once in one
767  // instruction, not even into the zero register.
768  ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
769  ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
770  ASSERT(dst0.IsValid());
771 
772  int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
773  int size = dst0.SizeInBytes();
774 
775  PrepareForPop(count, size);
776  PopHelper(count, size, dst0, dst1, dst2, dst3);
777 
778  if (!csp.Is(StackPointer()) && emit_debug_code()) {
779  // It is safe to leave csp where it is when unwinding the JavaScript stack,
780  // but if we keep it matching StackPointer, the simulator can detect memory
781  // accesses in the now-free part of the stack.
782  Mov(csp, StackPointer());
783  }
784 }
785 
786 
788  if (queued_.empty()) return;
789 
790  masm_->PrepareForPush(size_);
791 
792  int count = queued_.size();
793  int index = 0;
794  while (index < count) {
795  // PushHelper can only handle registers with the same size and type, and it
796  // can handle only four at a time. Batch them up accordingly.
797  CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
798  int batch_index = 0;
799  do {
800  batch[batch_index++] = queued_[index++];
801  } while ((batch_index < 4) && (index < count) &&
802  batch[0].IsSameSizeAndType(queued_[index]));
803 
804  masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
805  batch[0], batch[1], batch[2], batch[3]);
806  }
807 
808  queued_.clear();
809 }
810 
811 
813  if (queued_.empty()) return;
814 
815  masm_->PrepareForPop(size_);
816 
817  int count = queued_.size();
818  int index = 0;
819  while (index < count) {
820  // PopHelper can only handle registers with the same size and type, and it
821  // can handle only four at a time. Batch them up accordingly.
822  CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
823  int batch_index = 0;
824  do {
825  batch[batch_index++] = queued_[index++];
826  } while ((batch_index < 4) && (index < count) &&
827  batch[0].IsSameSizeAndType(queued_[index]));
828 
829  masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
830  batch[0], batch[1], batch[2], batch[3]);
831  }
832 
833  queued_.clear();
834 }
835 
836 
837 void MacroAssembler::PushCPURegList(CPURegList registers) {
838  int size = registers.RegisterSizeInBytes();
839 
840  PrepareForPush(registers.Count(), size);
841  // Push up to four registers at a time because if the current stack pointer is
842  // csp and reg_size is 32, registers must be pushed in blocks of four in order
843  // to maintain the 16-byte alignment for csp.
844  while (!registers.IsEmpty()) {
845  int count_before = registers.Count();
846  const CPURegister& src0 = registers.PopHighestIndex();
847  const CPURegister& src1 = registers.PopHighestIndex();
848  const CPURegister& src2 = registers.PopHighestIndex();
849  const CPURegister& src3 = registers.PopHighestIndex();
850  int count = count_before - registers.Count();
851  PushHelper(count, size, src0, src1, src2, src3);
852  }
853 }
854 
855 
856 void MacroAssembler::PopCPURegList(CPURegList registers) {
857  int size = registers.RegisterSizeInBytes();
858 
859  PrepareForPop(registers.Count(), size);
860  // Pop up to four registers at a time because if the current stack pointer is
861  // csp and reg_size is 32, registers must be pushed in blocks of four in
862  // order to maintain the 16-byte alignment for csp.
863  while (!registers.IsEmpty()) {
864  int count_before = registers.Count();
865  const CPURegister& dst0 = registers.PopLowestIndex();
866  const CPURegister& dst1 = registers.PopLowestIndex();
867  const CPURegister& dst2 = registers.PopLowestIndex();
868  const CPURegister& dst3 = registers.PopLowestIndex();
869  int count = count_before - registers.Count();
870  PopHelper(count, size, dst0, dst1, dst2, dst3);
871  }
872 
873  if (!csp.Is(StackPointer()) && emit_debug_code()) {
874  // It is safe to leave csp where it is when unwinding the JavaScript stack,
875  // but if we keep it matching StackPointer, the simulator can detect memory
876  // accesses in the now-free part of the stack.
877  Mov(csp, StackPointer());
878  }
879 }
880 
881 
882 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
883  int size = src.SizeInBytes();
884 
885  PrepareForPush(count, size);
886 
887  if (FLAG_optimize_for_size && count > 8) {
888  UseScratchRegisterScope temps(this);
889  Register temp = temps.AcquireX();
890 
891  Label loop;
892  __ Mov(temp, count / 2);
893  __ Bind(&loop);
894  PushHelper(2, size, src, src, NoReg, NoReg);
895  __ Subs(temp, temp, 1);
896  __ B(ne, &loop);
897 
898  count %= 2;
899  }
900 
901  // Push up to four registers at a time if possible because if the current
902  // stack pointer is csp and the register size is 32, registers must be pushed
903  // in blocks of four in order to maintain the 16-byte alignment for csp.
904  while (count >= 4) {
905  PushHelper(4, size, src, src, src, src);
906  count -= 4;
907  }
908  if (count >= 2) {
909  PushHelper(2, size, src, src, NoReg, NoReg);
910  count -= 2;
911  }
912  if (count == 1) {
913  PushHelper(1, size, src, NoReg, NoReg, NoReg);
914  count -= 1;
915  }
916  ASSERT(count == 0);
917 }
918 
919 
920 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
921  PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
922 
923  UseScratchRegisterScope temps(this);
924  Register temp = temps.AcquireSameSizeAs(count);
925 
926  if (FLAG_optimize_for_size) {
927  Label loop, done;
928 
929  Subs(temp, count, 1);
930  B(mi, &done);
931 
932  // Push all registers individually, to save code size.
933  Bind(&loop);
934  Subs(temp, temp, 1);
935  PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
936  B(pl, &loop);
937 
938  Bind(&done);
939  } else {
940  Label loop, leftover2, leftover1, done;
941 
942  Subs(temp, count, 4);
943  B(mi, &leftover2);
944 
945  // Push groups of four first.
946  Bind(&loop);
947  Subs(temp, temp, 4);
948  PushHelper(4, src.SizeInBytes(), src, src, src, src);
949  B(pl, &loop);
950 
951  // Push groups of two.
952  Bind(&leftover2);
953  Tbz(count, 1, &leftover1);
954  PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
955 
956  // Push the last one (if required).
957  Bind(&leftover1);
958  Tbz(count, 0, &done);
959  PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
960 
961  Bind(&done);
962  }
963 }
964 
965 
966 void MacroAssembler::PushHelper(int count, int size,
967  const CPURegister& src0,
968  const CPURegister& src1,
969  const CPURegister& src2,
970  const CPURegister& src3) {
971  // Ensure that we don't unintentially modify scratch or debug registers.
972  InstructionAccurateScope scope(this);
973 
974  ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
975  ASSERT(size == src0.SizeInBytes());
976 
977  // When pushing multiple registers, the store order is chosen such that
978  // Push(a, b) is equivalent to Push(a) followed by Push(b).
979  switch (count) {
980  case 1:
981  ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
982  str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
983  break;
984  case 2:
985  ASSERT(src2.IsNone() && src3.IsNone());
986  stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
987  break;
988  case 3:
989  ASSERT(src3.IsNone());
990  stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
991  str(src0, MemOperand(StackPointer(), 2 * size));
992  break;
993  case 4:
994  // Skip over 4 * size, then fill in the gap. This allows four W registers
995  // to be pushed using csp, whilst maintaining 16-byte alignment for csp
996  // at all times.
997  stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
998  stp(src1, src0, MemOperand(StackPointer(), 2 * size));
999  break;
1000  default:
1001  UNREACHABLE();
1002  }
1003 }
1004 
1005 
1006 void MacroAssembler::PopHelper(int count, int size,
1007  const CPURegister& dst0,
1008  const CPURegister& dst1,
1009  const CPURegister& dst2,
1010  const CPURegister& dst3) {
1011  // Ensure that we don't unintentially modify scratch or debug registers.
1012  InstructionAccurateScope scope(this);
1013 
1014  ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1015  ASSERT(size == dst0.SizeInBytes());
1016 
1017  // When popping multiple registers, the load order is chosen such that
1018  // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1019  switch (count) {
1020  case 1:
1021  ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1022  ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1023  break;
1024  case 2:
1025  ASSERT(dst2.IsNone() && dst3.IsNone());
1026  ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1027  break;
1028  case 3:
1029  ASSERT(dst3.IsNone());
1030  ldr(dst2, MemOperand(StackPointer(), 2 * size));
1031  ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1032  break;
1033  case 4:
1034  // Load the higher addresses first, then load the lower addresses and
1035  // skip the whole block in the second instruction. This allows four W
1036  // registers to be popped using csp, whilst maintaining 16-byte alignment
1037  // for csp at all times.
1038  ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1039  ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1040  break;
1041  default:
1042  UNREACHABLE();
1043  }
1044 }
1045 
1046 
1047 void MacroAssembler::PrepareForPush(Operand total_size) {
1048  // TODO(jbramley): This assertion generates too much code in some debug tests.
1049  // AssertStackConsistency();
1050  if (csp.Is(StackPointer())) {
1051  // If the current stack pointer is csp, then it must be aligned to 16 bytes
1052  // on entry and the total size of the specified registers must also be a
1053  // multiple of 16 bytes.
1054  if (total_size.IsImmediate()) {
1055  ASSERT((total_size.immediate() % 16) == 0);
1056  }
1057 
1058  // Don't check access size for non-immediate sizes. It's difficult to do
1059  // well, and it will be caught by hardware (or the simulator) anyway.
1060  } else {
1061  // Even if the current stack pointer is not the system stack pointer (csp),
1062  // the system stack pointer will still be modified in order to comply with
1063  // ABI rules about accessing memory below the system stack pointer.
1064  BumpSystemStackPointer(total_size);
1065  }
1066 }
1067 
1068 
1069 void MacroAssembler::PrepareForPop(Operand total_size) {
1071  if (csp.Is(StackPointer())) {
1072  // If the current stack pointer is csp, then it must be aligned to 16 bytes
1073  // on entry and the total size of the specified registers must also be a
1074  // multiple of 16 bytes.
1075  if (total_size.IsImmediate()) {
1076  ASSERT((total_size.immediate() % 16) == 0);
1077  }
1078 
1079  // Don't check access size for non-immediate sizes. It's difficult to do
1080  // well, and it will be caught by hardware (or the simulator) anyway.
1081  }
1082 }
1083 
1084 
1085 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1086  if (offset.IsImmediate()) {
1087  ASSERT(offset.immediate() >= 0);
1088  } else if (emit_debug_code()) {
1089  Cmp(xzr, offset);
1090  Check(le, kStackAccessBelowStackPointer);
1091  }
1092 
1093  Str(src, MemOperand(StackPointer(), offset));
1094 }
1095 
1096 
1097 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1098  if (offset.IsImmediate()) {
1099  ASSERT(offset.immediate() >= 0);
1100  } else if (emit_debug_code()) {
1101  Cmp(xzr, offset);
1102  Check(le, kStackAccessBelowStackPointer);
1103  }
1104 
1105  Ldr(dst, MemOperand(StackPointer(), offset));
1106 }
1107 
1108 
1109 void MacroAssembler::PokePair(const CPURegister& src1,
1110  const CPURegister& src2,
1111  int offset) {
1112  ASSERT(AreSameSizeAndType(src1, src2));
1113  ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1114  Stp(src1, src2, MemOperand(StackPointer(), offset));
1115 }
1116 
1117 
1118 void MacroAssembler::PeekPair(const CPURegister& dst1,
1119  const CPURegister& dst2,
1120  int offset) {
1121  ASSERT(AreSameSizeAndType(dst1, dst2));
1122  ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1123  Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1124 }
1125 
1126 
1128  // Ensure that the macro-assembler doesn't use any scratch registers.
1129  InstructionAccurateScope scope(this);
1130 
1131  // This method must not be called unless the current stack pointer is the
1132  // system stack pointer (csp).
1133  ASSERT(csp.Is(StackPointer()));
1134 
1135  MemOperand tos(csp, -2 * kXRegSize, PreIndex);
1136 
1137  stp(d14, d15, tos);
1138  stp(d12, d13, tos);
1139  stp(d10, d11, tos);
1140  stp(d8, d9, tos);
1141 
1142  stp(x29, x30, tos);
1143  stp(x27, x28, tos); // x28 = jssp
1144  stp(x25, x26, tos);
1145  stp(x23, x24, tos);
1146  stp(x21, x22, tos);
1147  stp(x19, x20, tos);
1148 }
1149 
1150 
1152  // Ensure that the macro-assembler doesn't use any scratch registers.
1153  InstructionAccurateScope scope(this);
1154 
1155  // This method must not be called unless the current stack pointer is the
1156  // system stack pointer (csp).
1157  ASSERT(csp.Is(StackPointer()));
1158 
1159  MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1160 
1161  ldp(x19, x20, tos);
1162  ldp(x21, x22, tos);
1163  ldp(x23, x24, tos);
1164  ldp(x25, x26, tos);
1165  ldp(x27, x28, tos); // x28 = jssp
1166  ldp(x29, x30, tos);
1167 
1168  ldp(d8, d9, tos);
1169  ldp(d10, d11, tos);
1170  ldp(d12, d13, tos);
1171  ldp(d14, d15, tos);
1172 }
1173 
1174 
1176  if (emit_debug_code()) {
1177  if (csp.Is(StackPointer())) {
1178  // We can't check the alignment of csp without using a scratch register
1179  // (or clobbering the flags), but the processor (or simulator) will abort
1180  // if it is not properly aligned during a load.
1181  ldr(xzr, MemOperand(csp, 0));
1182  } else if (FLAG_enable_slow_asserts) {
1183  Label ok;
1184  // Check that csp <= StackPointer(), preserving all registers and NZCV.
1185  sub(StackPointer(), csp, StackPointer());
1186  cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
1187  tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
1188 
1189  Abort(kTheCurrentStackPointerIsBelowCsp);
1190 
1191  bind(&ok);
1192  // Restore StackPointer().
1193  sub(StackPointer(), csp, StackPointer());
1194  }
1195  }
1196 }
1197 
1198 
1199 void MacroAssembler::LoadRoot(Register destination,
1200  Heap::RootListIndex index) {
1201  // TODO(jbramley): Most root values are constants, and can be synthesized
1202  // without a load. Refer to the ARM back end for details.
1203  Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1204 }
1205 
1206 
1207 void MacroAssembler::StoreRoot(Register source,
1208  Heap::RootListIndex index) {
1209  Str(source, MemOperand(root, index << kPointerSizeLog2));
1210 }
1211 
1212 
1213 void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1214  Register false_root) {
1215  STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1216  Ldp(true_root, false_root,
1217  MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1218 }
1219 
1220 
1221 void MacroAssembler::LoadHeapObject(Register result,
1222  Handle<HeapObject> object) {
1223  AllowDeferredHandleDereference using_raw_address;
1224  if (isolate()->heap()->InNewSpace(*object)) {
1225  Handle<Cell> cell = isolate()->factory()->NewCell(object);
1226  Mov(result, Operand(cell));
1227  Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
1228  } else {
1229  Mov(result, Operand(object));
1230  }
1231 }
1232 
1233 
1235  Register descriptors) {
1236  Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1237 }
1238 
1239 
1240 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1242  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1243 }
1244 
1245 
1246 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1249  And(dst, dst, Map::EnumLengthBits::kMask);
1250 }
1251 
1252 
1253 void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1257 }
1258 
1259 
1260 void MacroAssembler::CheckEnumCache(Register object,
1261  Register null_value,
1262  Register scratch0,
1263  Register scratch1,
1264  Register scratch2,
1265  Register scratch3,
1266  Label* call_runtime) {
1267  ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
1268  scratch3));
1269 
1270  Register empty_fixed_array_value = scratch0;
1271  Register current_object = scratch1;
1272 
1273  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1274  Label next, start;
1275 
1276  Mov(current_object, object);
1277 
1278  // Check if the enum length field is properly initialized, indicating that
1279  // there is an enum cache.
1280  Register map = scratch2;
1281  Register enum_length = scratch3;
1282  Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1283 
1284  EnumLengthUntagged(enum_length, map);
1285  Cmp(enum_length, kInvalidEnumCacheSentinel);
1286  B(eq, call_runtime);
1287 
1288  B(&start);
1289 
1290  Bind(&next);
1291  Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1292 
1293  // For all objects but the receiver, check that the cache is empty.
1294  EnumLengthUntagged(enum_length, map);
1295  Cbnz(enum_length, call_runtime);
1296 
1297  Bind(&start);
1298 
1299  // Check that there are no elements. Register current_object contains the
1300  // current JS object we've reached through the prototype chain.
1301  Label no_elements;
1302  Ldr(current_object, FieldMemOperand(current_object,
1304  Cmp(current_object, empty_fixed_array_value);
1305  B(eq, &no_elements);
1306 
1307  // Second chance, the object may be using the empty slow element dictionary.
1308  CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1309  B(ne, call_runtime);
1310 
1311  Bind(&no_elements);
1312  Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1313  Cmp(current_object, null_value);
1314  B(ne, &next);
1315 }
1316 
1317 
1319  Register scratch1,
1320  Register scratch2,
1321  Label* no_memento_found) {
1322  ExternalReference new_space_start =
1323  ExternalReference::new_space_start(isolate());
1324  ExternalReference new_space_allocation_top =
1325  ExternalReference::new_space_allocation_top_address(isolate());
1326 
1327  Add(scratch1, receiver,
1329  Cmp(scratch1, new_space_start);
1330  B(lt, no_memento_found);
1331 
1332  Mov(scratch2, new_space_allocation_top);
1333  Ldr(scratch2, MemOperand(scratch2));
1334  Cmp(scratch1, scratch2);
1335  B(gt, no_memento_found);
1336 
1337  Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
1338  Cmp(scratch1,
1339  Operand(isolate()->factory()->allocation_memento_map()));
1340 }
1341 
1342 
1343 void MacroAssembler::JumpToHandlerEntry(Register exception,
1344  Register object,
1345  Register state,
1346  Register scratch1,
1347  Register scratch2) {
1348  // Handler expects argument in x0.
1349  ASSERT(exception.Is(x0));
1350 
1351  // Compute the handler entry address and jump to it. The handler table is
1352  // a fixed array of (smi-tagged) code offsets.
1353  Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
1354  Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
1355  STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
1356  Lsr(scratch2, state, StackHandler::kKindWidth);
1357  Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
1358  Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
1359  Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
1360  Br(scratch1);
1361 }
1362 
1363 
1364 void MacroAssembler::InNewSpace(Register object,
1365  Condition cond,
1366  Label* branch) {
1367  ASSERT(cond == eq || cond == ne);
1368  UseScratchRegisterScope temps(this);
1369  Register temp = temps.AcquireX();
1370  And(temp, object, ExternalReference::new_space_mask(isolate()));
1371  Cmp(temp, ExternalReference::new_space_start(isolate()));
1372  B(cond, branch);
1373 }
1374 
1375 
1376 void MacroAssembler::Throw(Register value,
1377  Register scratch1,
1378  Register scratch2,
1379  Register scratch3,
1380  Register scratch4) {
1381  // Adjust this code if not the case.
1388 
1389  // The handler expects the exception in x0.
1390  ASSERT(value.Is(x0));
1391 
1392  // Drop the stack pointer to the top of the top handler.
1393  ASSERT(jssp.Is(StackPointer()));
1394  Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1395  isolate())));
1396  Ldr(jssp, MemOperand(scratch1));
1397  // Restore the next handler.
1398  Pop(scratch2);
1399  Str(scratch2, MemOperand(scratch1));
1400 
1401  // Get the code object and state. Restore the context and frame pointer.
1402  Register object = scratch1;
1403  Register state = scratch2;
1404  Pop(object, state, cp, fp);
1405 
1406  // If the handler is a JS frame, restore the context to the frame.
1407  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1408  // or cp.
1409  Label not_js_frame;
1410  Cbz(cp, &not_js_frame);
1412  Bind(&not_js_frame);
1413 
1414  JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1415 }
1416 
1417 
1418 void MacroAssembler::ThrowUncatchable(Register value,
1419  Register scratch1,
1420  Register scratch2,
1421  Register scratch3,
1422  Register scratch4) {
1423  // Adjust this code if not the case.
1430 
1431  // The handler expects the exception in x0.
1432  ASSERT(value.Is(x0));
1433 
1434  // Drop the stack pointer to the top of the top stack handler.
1435  ASSERT(jssp.Is(StackPointer()));
1436  Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1437  isolate())));
1438  Ldr(jssp, MemOperand(scratch1));
1439 
1440  // Unwind the handlers until the ENTRY handler is found.
1441  Label fetch_next, check_kind;
1442  B(&check_kind);
1443  Bind(&fetch_next);
1445 
1446  Bind(&check_kind);
1447  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1449  TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
1450 
1451  // Set the top handler address to next handler past the top ENTRY handler.
1452  Pop(scratch2);
1453  Str(scratch2, MemOperand(scratch1));
1454 
1455  // Get the code object and state. Clear the context and frame pointer (0 was
1456  // saved in the handler).
1457  Register object = scratch1;
1458  Register state = scratch2;
1459  Pop(object, state, cp, fp);
1460 
1461  JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1462 }
1463 
1464 
1465 void MacroAssembler::Throw(BailoutReason reason) {
1466  Label throw_start;
1467  Bind(&throw_start);
1468 #ifdef DEBUG
1469  const char* msg = GetBailoutReason(reason);
1470  RecordComment("Throw message: ");
1471  RecordComment((msg != NULL) ? msg : "UNKNOWN");
1472 #endif
1473 
1474  Mov(x0, Smi::FromInt(reason));
1475  Push(x0);
1476 
1477  // Disable stub call restrictions to always allow calls to throw.
1478  if (!has_frame_) {
1479  // We don't actually want to generate a pile of code for this, so just
1480  // claim there is a stack frame, without generating one.
1481  FrameScope scope(this, StackFrame::NONE);
1482  CallRuntime(Runtime::kHiddenThrowMessage, 1);
1483  } else {
1484  CallRuntime(Runtime::kHiddenThrowMessage, 1);
1485  }
1486  // ThrowMessage should not return here.
1487  Unreachable();
1488 }
1489 
1490 
1492  Label ok;
1493  B(InvertCondition(cc), &ok);
1494  Throw(reason);
1495  Bind(&ok);
1496 }
1497 
1498 
1499 void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) {
1500  Label ok;
1501  JumpIfNotSmi(value, &ok);
1502  Throw(reason);
1503  Bind(&ok);
1504 }
1505 
1506 
1507 void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
1508  ASSERT(smi.Is64Bits());
1509  Abs(smi, smi, slow);
1510 }
1511 
1512 
1513 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1514  if (emit_debug_code()) {
1515  STATIC_ASSERT(kSmiTag == 0);
1516  Tst(object, kSmiTagMask);
1517  Check(eq, reason);
1518  }
1519 }
1520 
1521 
1522 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1523  if (emit_debug_code()) {
1524  STATIC_ASSERT(kSmiTag == 0);
1525  Tst(object, kSmiTagMask);
1526  Check(ne, reason);
1527  }
1528 }
1529 
1530 
1531 void MacroAssembler::AssertName(Register object) {
1532  if (emit_debug_code()) {
1533  AssertNotSmi(object, kOperandIsASmiAndNotAName);
1534 
1535  UseScratchRegisterScope temps(this);
1536  Register temp = temps.AcquireX();
1537 
1538  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1539  CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1540  Check(ls, kOperandIsNotAName);
1541  }
1542 }
1543 
1544 
1546  Register scratch) {
1547  if (emit_debug_code()) {
1548  Label done_checking;
1549  AssertNotSmi(object);
1550  JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1551  Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1552  CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1553  Assert(eq, kExpectedUndefinedOrCell);
1554  Bind(&done_checking);
1555  }
1556 }
1557 
1558 
1559 void MacroAssembler::AssertString(Register object) {
1560  if (emit_debug_code()) {
1561  UseScratchRegisterScope temps(this);
1562  Register temp = temps.AcquireX();
1563  STATIC_ASSERT(kSmiTag == 0);
1564  Tst(object, kSmiTagMask);
1565  Check(ne, kOperandIsASmiAndNotAString);
1566  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1568  Check(lo, kOperandIsNotAString);
1569  }
1570 }
1571 
1572 
1573 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1574  ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1575  Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
1576 }
1577 
1578 
1579 void MacroAssembler::TailCallStub(CodeStub* stub) {
1580  Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
1581 }
1582 
1583 
1584 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1585  int num_arguments,
1586  SaveFPRegsMode save_doubles) {
1587  // All arguments must be on the stack before this function is called.
1588  // x0 holds the return value after the call.
1589 
1590  // Check that the number of arguments matches what the function expects.
1591  // If f->nargs is -1, the function can accept a variable number of arguments.
1592  if (f->nargs >= 0 && f->nargs != num_arguments) {
1593  // Illegal operation: drop the stack arguments and return undefined.
1594  if (num_arguments > 0) {
1595  Drop(num_arguments);
1596  }
1597  LoadRoot(x0, Heap::kUndefinedValueRootIndex);
1598  return;
1599  }
1600 
1601  // Place the necessary arguments.
1602  Mov(x0, num_arguments);
1603  Mov(x1, ExternalReference(f, isolate()));
1604 
1605  CEntryStub stub(1, save_doubles);
1606  CallStub(&stub);
1607 }
1608 
1609 
1610 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
1611  return ref0.address() - ref1.address();
1612 }
1613 
1614 
1616  Register function_address,
1617  ExternalReference thunk_ref,
1618  int stack_space,
1619  int spill_offset,
1620  MemOperand return_value_operand,
1621  MemOperand* context_restore_operand) {
1622  ASM_LOCATION("CallApiFunctionAndReturn");
1623  ExternalReference next_address =
1624  ExternalReference::handle_scope_next_address(isolate());
1625  const int kNextOffset = 0;
1626  const int kLimitOffset = AddressOffset(
1627  ExternalReference::handle_scope_limit_address(isolate()),
1628  next_address);
1629  const int kLevelOffset = AddressOffset(
1630  ExternalReference::handle_scope_level_address(isolate()),
1631  next_address);
1632 
1633  ASSERT(function_address.is(x1) || function_address.is(x2));
1634 
1635  Label profiler_disabled;
1636  Label end_profiler_check;
1637  bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address();
1638  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
1639  Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag));
1640  Ldrb(w10, MemOperand(x10));
1641  Cbz(w10, &profiler_disabled);
1642  Mov(x3, thunk_ref);
1643  B(&end_profiler_check);
1644 
1645  Bind(&profiler_disabled);
1646  Mov(x3, function_address);
1647  Bind(&end_profiler_check);
1648 
1649  // Save the callee-save registers we are going to use.
1650  // TODO(all): Is this necessary? ARM doesn't do it.
1652  Poke(x19, (spill_offset + 0) * kXRegSize);
1653  Poke(x20, (spill_offset + 1) * kXRegSize);
1654  Poke(x21, (spill_offset + 2) * kXRegSize);
1655  Poke(x22, (spill_offset + 3) * kXRegSize);
1656 
1657  // Allocate HandleScope in callee-save registers.
1658  // We will need to restore the HandleScope after the call to the API function,
1659  // by allocating it in callee-save registers they will be preserved by C code.
1660  Register handle_scope_base = x22;
1661  Register next_address_reg = x19;
1662  Register limit_reg = x20;
1663  Register level_reg = w21;
1664 
1665  Mov(handle_scope_base, next_address);
1666  Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
1667  Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
1668  Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1669  Add(level_reg, level_reg, 1);
1670  Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1671 
1672  if (FLAG_log_timer_events) {
1673  FrameScope frame(this, StackFrame::MANUAL);
1675  Mov(x0, ExternalReference::isolate_address(isolate()));
1676  CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
1678  }
1679 
1680  // Native call returns to the DirectCEntry stub which redirects to the
1681  // return address pushed on stack (could have moved after GC).
1682  // DirectCEntry stub itself is generated early and never moves.
1683  DirectCEntryStub stub;
1684  stub.GenerateCall(this, x3);
1685 
1686  if (FLAG_log_timer_events) {
1687  FrameScope frame(this, StackFrame::MANUAL);
1689  Mov(x0, ExternalReference::isolate_address(isolate()));
1690  CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
1692  }
1693 
1694  Label promote_scheduled_exception;
1695  Label exception_handled;
1696  Label delete_allocated_handles;
1697  Label leave_exit_frame;
1698  Label return_value_loaded;
1699 
1700  // Load value from ReturnValue.
1701  Ldr(x0, return_value_operand);
1702  Bind(&return_value_loaded);
1703  // No more valid handles (the result handle was the last one). Restore
1704  // previous handle scope.
1705  Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
1706  if (emit_debug_code()) {
1707  Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
1708  Cmp(w1, level_reg);
1709  Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
1710  }
1711  Sub(level_reg, level_reg, 1);
1712  Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1713  Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
1714  Cmp(limit_reg, x1);
1715  B(ne, &delete_allocated_handles);
1716 
1717  Bind(&leave_exit_frame);
1718  // Restore callee-saved registers.
1719  Peek(x19, (spill_offset + 0) * kXRegSize);
1720  Peek(x20, (spill_offset + 1) * kXRegSize);
1721  Peek(x21, (spill_offset + 2) * kXRegSize);
1722  Peek(x22, (spill_offset + 3) * kXRegSize);
1723 
1724  // Check if the function scheduled an exception.
1725  Mov(x5, ExternalReference::scheduled_exception_address(isolate()));
1726  Ldr(x5, MemOperand(x5));
1727  JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
1728  Bind(&exception_handled);
1729 
1730  bool restore_context = context_restore_operand != NULL;
1731  if (restore_context) {
1732  Ldr(cp, *context_restore_operand);
1733  }
1734 
1735  LeaveExitFrame(false, x1, !restore_context);
1736  Drop(stack_space);
1737  Ret();
1738 
1739  Bind(&promote_scheduled_exception);
1740  {
1741  FrameScope frame(this, StackFrame::INTERNAL);
1743  ExternalReference(
1744  Runtime::kHiddenPromoteScheduledException, isolate()), 0);
1745  }
1746  B(&exception_handled);
1747 
1748  // HandleScope limit has changed. Delete allocated extensions.
1749  Bind(&delete_allocated_handles);
1750  Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
1751  // Save the return value in a callee-save register.
1752  Register saved_result = x19;
1753  Mov(saved_result, x0);
1754  Mov(x0, ExternalReference::isolate_address(isolate()));
1755  CallCFunction(
1756  ExternalReference::delete_handle_scope_extensions(isolate()), 1);
1757  Mov(x0, saved_result);
1758  B(&leave_exit_frame);
1759 }
1760 
1761 
1762 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1763  int num_arguments) {
1764  Mov(x0, num_arguments);
1765  Mov(x1, ext);
1766 
1767  CEntryStub stub(1);
1768  CallStub(&stub);
1769 }
1770 
1771 
1772 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1773  Mov(x1, builtin);
1774  CEntryStub stub(1);
1775  Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
1776 }
1777 
1778 
1779 void MacroAssembler::GetBuiltinFunction(Register target,
1780  Builtins::JavaScript id) {
1781  // Load the builtins object into target register.
1782  Ldr(target, GlobalObjectMemOperand());
1784  // Load the JavaScript builtin function from the builtins object.
1785  Ldr(target, FieldMemOperand(target,
1787 }
1788 
1789 
1790 void MacroAssembler::GetBuiltinEntry(Register target,
1791  Register function,
1792  Builtins::JavaScript id) {
1793  ASSERT(!AreAliased(target, function));
1794  GetBuiltinFunction(function, id);
1795  // Load the code entry point from the builtins object.
1796  Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1797 }
1798 
1799 
1801  InvokeFlag flag,
1802  const CallWrapper& call_wrapper) {
1803  ASM_LOCATION("MacroAssembler::InvokeBuiltin");
1804  // You can't call a builtin without a valid frame.
1805  ASSERT(flag == JUMP_FUNCTION || has_frame());
1806 
1807  // Get the builtin entry in x2 and setup the function object in x1.
1808  GetBuiltinEntry(x2, x1, id);
1809  if (flag == CALL_FUNCTION) {
1810  call_wrapper.BeforeCall(CallSize(x2));
1811  Call(x2);
1812  call_wrapper.AfterCall();
1813  } else {
1814  ASSERT(flag == JUMP_FUNCTION);
1815  Jump(x2);
1816  }
1817 }
1818 
1819 
1820 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1821  int num_arguments,
1822  int result_size) {
1823  // TODO(1236192): Most runtime routines don't need the number of
1824  // arguments passed in because it is constant. At some point we
1825  // should remove this need and make the runtime routine entry code
1826  // smarter.
1827  Mov(x0, num_arguments);
1829 }
1830 
1831 
1833  int num_arguments,
1834  int result_size) {
1835  TailCallExternalReference(ExternalReference(fid, isolate()),
1836  num_arguments,
1837  result_size);
1838 }
1839 
1840 
1841 void MacroAssembler::InitializeNewString(Register string,
1842  Register length,
1843  Heap::RootListIndex map_index,
1844  Register scratch1,
1845  Register scratch2) {
1846  ASSERT(!AreAliased(string, length, scratch1, scratch2));
1847  LoadRoot(scratch2, map_index);
1848  SmiTag(scratch1, length);
1849  Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1850 
1851  Mov(scratch2, String::kEmptyHashField);
1852  Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1853  Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1854 }
1855 
1856 
1858 #if V8_HOST_ARCH_ARM64
1859  // Running on the real platform. Use the alignment as mandated by the local
1860  // environment.
1861  // Note: This will break if we ever start generating snapshots on one ARM
1862  // platform for another ARM platform with a different alignment.
1864 #else // V8_HOST_ARCH_ARM64
1865  // If we are using the simulator then we should always align to the expected
1866  // alignment. As the simulator is used to generate snapshots we do not know
1867  // if the target platform will need alignment, so this is controlled from a
1868  // flag.
1869  return FLAG_sim_stack_alignment;
1870 #endif // V8_HOST_ARCH_ARM64
1871 }
1872 
1873 
1874 void MacroAssembler::CallCFunction(ExternalReference function,
1875  int num_of_reg_args) {
1876  CallCFunction(function, num_of_reg_args, 0);
1877 }
1878 
1879 
1880 void MacroAssembler::CallCFunction(ExternalReference function,
1881  int num_of_reg_args,
1882  int num_of_double_args) {
1883  UseScratchRegisterScope temps(this);
1884  Register temp = temps.AcquireX();
1885  Mov(temp, function);
1886  CallCFunction(temp, num_of_reg_args, num_of_double_args);
1887 }
1888 
1889 
1890 void MacroAssembler::CallCFunction(Register function,
1891  int num_of_reg_args,
1892  int num_of_double_args) {
1893  ASSERT(has_frame());
1894  // We can pass 8 integer arguments in registers. If we need to pass more than
1895  // that, we'll need to implement support for passing them on the stack.
1896  ASSERT(num_of_reg_args <= 8);
1897 
1898  // If we're passing doubles, we're limited to the following prototypes
1899  // (defined by ExternalReference::Type):
1900  // BUILTIN_COMPARE_CALL: int f(double, double)
1901  // BUILTIN_FP_FP_CALL: double f(double, double)
1902  // BUILTIN_FP_CALL: double f(double)
1903  // BUILTIN_FP_INT_CALL: double f(double, int)
1904  if (num_of_double_args > 0) {
1905  ASSERT(num_of_reg_args <= 1);
1906  ASSERT((num_of_double_args + num_of_reg_args) <= 2);
1907  }
1908 
1909 
1910  // If the stack pointer is not csp, we need to derive an aligned csp from the
1911  // current stack pointer.
1912  const Register old_stack_pointer = StackPointer();
1913  if (!csp.Is(old_stack_pointer)) {
1915 
1916  int sp_alignment = ActivationFrameAlignment();
1917  // The ABI mandates at least 16-byte alignment.
1918  ASSERT(sp_alignment >= 16);
1919  ASSERT(IsPowerOf2(sp_alignment));
1920 
1921  // The current stack pointer is a callee saved register, and is preserved
1922  // across the call.
1923  ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1924 
1925  // Align and synchronize the system stack pointer with jssp.
1926  Bic(csp, old_stack_pointer, sp_alignment - 1);
1927  SetStackPointer(csp);
1928  }
1929 
1930  // Call directly. The function called cannot cause a GC, or allow preemption,
1931  // so the return address in the link register stays correct.
1932  Call(function);
1933 
1934  if (!csp.Is(old_stack_pointer)) {
1935  if (emit_debug_code()) {
1936  // Because the stack pointer must be aligned on a 16-byte boundary, the
1937  // aligned csp can be up to 12 bytes below the jssp. This is the case
1938  // where we only pushed one W register on top of an aligned jssp.
1939  UseScratchRegisterScope temps(this);
1940  Register temp = temps.AcquireX();
1942  Sub(temp, csp, old_stack_pointer);
1943  // We want temp <= 0 && temp >= -12.
1944  Cmp(temp, 0);
1945  Ccmp(temp, -12, NFlag, le);
1946  Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1947  }
1948  SetStackPointer(old_stack_pointer);
1949  }
1950 }
1951 
1952 
1953 void MacroAssembler::Jump(Register target) {
1954  Br(target);
1955 }
1956 
1957 
1958 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
1959  UseScratchRegisterScope temps(this);
1960  Register temp = temps.AcquireX();
1961  Mov(temp, Operand(target, rmode));
1962  Br(temp);
1963 }
1964 
1965 
1966 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
1967  ASSERT(!RelocInfo::IsCodeTarget(rmode));
1968  Jump(reinterpret_cast<intptr_t>(target), rmode);
1969 }
1970 
1971 
1972 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
1973  ASSERT(RelocInfo::IsCodeTarget(rmode));
1974  AllowDeferredHandleDereference embedding_raw_address;
1975  Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
1976 }
1977 
1978 
1979 void MacroAssembler::Call(Register target) {
1980  BlockPoolsScope scope(this);
1981 #ifdef DEBUG
1982  Label start_call;
1983  Bind(&start_call);
1984 #endif
1985 
1986  Blr(target);
1987 
1988 #ifdef DEBUG
1989  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1990 #endif
1991 }
1992 
1993 
1994 void MacroAssembler::Call(Label* target) {
1995  BlockPoolsScope scope(this);
1996 #ifdef DEBUG
1997  Label start_call;
1998  Bind(&start_call);
1999 #endif
2000 
2001  Bl(target);
2002 
2003 #ifdef DEBUG
2004  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
2005 #endif
2006 }
2007 
2008 
2009 // MacroAssembler::CallSize is sensitive to changes in this function, as it
2010 // requires to know how many instructions are used to branch to the target.
2011 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
2012  BlockPoolsScope scope(this);
2013 #ifdef DEBUG
2014  Label start_call;
2015  Bind(&start_call);
2016 #endif
2017  // Statement positions are expected to be recorded when the target
2018  // address is loaded.
2019  positions_recorder()->WriteRecordedPositions();
2020 
2021  // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2022  ASSERT(rmode != RelocInfo::NONE32);
2023 
2024  UseScratchRegisterScope temps(this);
2025  Register temp = temps.AcquireX();
2026 
2027  if (rmode == RelocInfo::NONE64) {
2028  // Addresses are 48 bits so we never need to load the upper 16 bits.
2029  uint64_t imm = reinterpret_cast<uint64_t>(target);
2030  // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
2031  ASSERT(((imm >> 48) & 0xffff) == 0);
2032  movz(temp, (imm >> 0) & 0xffff, 0);
2033  movk(temp, (imm >> 16) & 0xffff, 16);
2034  movk(temp, (imm >> 32) & 0xffff, 32);
2035  } else {
2036  LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
2037  }
2038  Blr(temp);
2039 #ifdef DEBUG
2040  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
2041 #endif
2042 }
2043 
2044 
2045 void MacroAssembler::Call(Handle<Code> code,
2046  RelocInfo::Mode rmode,
2047  TypeFeedbackId ast_id) {
2048 #ifdef DEBUG
2049  Label start_call;
2050  Bind(&start_call);
2051 #endif
2052 
2053  if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
2054  SetRecordedAstId(ast_id);
2055  rmode = RelocInfo::CODE_TARGET_WITH_ID;
2056  }
2057 
2058  AllowDeferredHandleDereference embedding_raw_address;
2059  Call(reinterpret_cast<Address>(code.location()), rmode);
2060 
2061 #ifdef DEBUG
2062  // Check the size of the code generated.
2063  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
2064 #endif
2065 }
2066 
2067 
2068 int MacroAssembler::CallSize(Register target) {
2069  USE(target);
2070  return kInstructionSize;
2071 }
2072 
2073 
2074 int MacroAssembler::CallSize(Label* target) {
2075  USE(target);
2076  return kInstructionSize;
2077 }
2078 
2079 
2080 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
2081  USE(target);
2082 
2083  // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2084  ASSERT(rmode != RelocInfo::NONE32);
2085 
2086  if (rmode == RelocInfo::NONE64) {
2088  } else {
2089  return kCallSizeWithRelocation;
2090  }
2091 }
2092 
2093 
2094 int MacroAssembler::CallSize(Handle<Code> code,
2095  RelocInfo::Mode rmode,
2096  TypeFeedbackId ast_id) {
2097  USE(code);
2098  USE(ast_id);
2099 
2100  // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2101  ASSERT(rmode != RelocInfo::NONE32);
2102 
2103  if (rmode == RelocInfo::NONE64) {
2105  } else {
2106  return kCallSizeWithRelocation;
2107  }
2108 }
2109 
2110 
2111 
2112 
2113 
2114 void MacroAssembler::JumpForHeapNumber(Register object,
2115  Register heap_number_map,
2116  Label* on_heap_number,
2117  Label* on_not_heap_number) {
2118  ASSERT(on_heap_number || on_not_heap_number);
2119  AssertNotSmi(object);
2120 
2121  UseScratchRegisterScope temps(this);
2122  Register temp = temps.AcquireX();
2123 
2124  // Load the HeapNumber map if it is not passed.
2125  if (heap_number_map.Is(NoReg)) {
2126  heap_number_map = temps.AcquireX();
2127  LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2128  } else {
2129  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2130  }
2131 
2132  ASSERT(!AreAliased(temp, heap_number_map));
2133 
2134  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2135  Cmp(temp, heap_number_map);
2136 
2137  if (on_heap_number) {
2138  B(eq, on_heap_number);
2139  }
2140  if (on_not_heap_number) {
2141  B(ne, on_not_heap_number);
2142  }
2143 }
2144 
2145 
2146 void MacroAssembler::JumpIfHeapNumber(Register object,
2147  Label* on_heap_number,
2148  Register heap_number_map) {
2149  JumpForHeapNumber(object,
2150  heap_number_map,
2151  on_heap_number,
2152  NULL);
2153 }
2154 
2155 
2156 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2157  Label* on_not_heap_number,
2158  Register heap_number_map) {
2159  JumpForHeapNumber(object,
2160  heap_number_map,
2161  NULL,
2162  on_not_heap_number);
2163 }
2164 
2165 
2166 void MacroAssembler::LookupNumberStringCache(Register object,
2167  Register result,
2168  Register scratch1,
2169  Register scratch2,
2170  Register scratch3,
2171  Label* not_found) {
2172  ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
2173 
2174  // Use of registers. Register result is used as a temporary.
2175  Register number_string_cache = result;
2176  Register mask = scratch3;
2177 
2178  // Load the number string cache.
2179  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2180 
2181  // Make the hash mask from the length of the number string cache. It
2182  // contains two elements (number and string) for each cache entry.
2183  Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
2185  Asr(mask, mask, 1); // Divide length by two.
2186  Sub(mask, mask, 1); // Make mask.
2187 
2188  // Calculate the entry in the number string cache. The hash value in the
2189  // number string cache for smis is just the smi value, and the hash for
2190  // doubles is the xor of the upper and lower words. See
2191  // Heap::GetNumberStringCache.
2192  Label is_smi;
2193  Label load_result_from_cache;
2194 
2195  JumpIfSmi(object, &is_smi);
2196  CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
2198 
2200  Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
2201  Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
2202  Eor(scratch1, scratch1, scratch2);
2203  And(scratch1, scratch1, mask);
2204 
2205  // Calculate address of entry in string cache: each entry consists of two
2206  // pointer sized fields.
2207  Add(scratch1, number_string_cache,
2208  Operand(scratch1, LSL, kPointerSizeLog2 + 1));
2209 
2210  Register probe = mask;
2211  Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2212  JumpIfSmi(probe, not_found);
2215  Fcmp(d0, d1);
2216  B(ne, not_found);
2217  B(&load_result_from_cache);
2218 
2219  Bind(&is_smi);
2220  Register scratch = scratch1;
2221  And(scratch, mask, Operand::UntagSmi(object));
2222  // Calculate address of entry in string cache: each entry consists
2223  // of two pointer sized fields.
2224  Add(scratch, number_string_cache,
2225  Operand(scratch, LSL, kPointerSizeLog2 + 1));
2226 
2227  // Check if the entry is the smi we are looking for.
2228  Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2229  Cmp(object, probe);
2230  B(ne, not_found);
2231 
2232  // Get the result from the cache.
2233  Bind(&load_result_from_cache);
2235  IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
2236  scratch1, scratch2);
2237 }
2238 
2239 
2240 void MacroAssembler::TryConvertDoubleToInt(Register as_int,
2241  FPRegister value,
2242  FPRegister scratch_d,
2243  Label* on_successful_conversion,
2244  Label* on_failed_conversion) {
2245  // Convert to an int and back again, then compare with the original value.
2246  Fcvtzs(as_int, value);
2247  Scvtf(scratch_d, as_int);
2248  Fcmp(value, scratch_d);
2249 
2250  if (on_successful_conversion) {
2251  B(on_successful_conversion, eq);
2252  }
2253  if (on_failed_conversion) {
2254  B(on_failed_conversion, ne);
2255  }
2256 }
2257 
2258 
2260  UseScratchRegisterScope temps(this);
2261  Register temp = temps.AcquireX();
2262  // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2263  // cause overflow.
2264  Fmov(temp, input);
2265  Cmp(temp, 1);
2266 }
2267 
2268 
2270  Label* on_negative_zero) {
2271  TestForMinusZero(input);
2272  B(vs, on_negative_zero);
2273 }
2274 
2275 
2276 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2277  // Clamp the value to [0..255].
2278  Cmp(input.W(), Operand(input.W(), UXTB));
2279  // If input < input & 0xff, it must be < 0, so saturate to 0.
2280  Csel(output.W(), wzr, input.W(), lt);
2281  // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2282  Csel(output.W(), output.W(), 255, le);
2283 }
2284 
2285 
2286 void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2287  ClampInt32ToUint8(in_out, in_out);
2288 }
2289 
2290 
2291 void MacroAssembler::ClampDoubleToUint8(Register output,
2292  DoubleRegister input,
2293  DoubleRegister dbl_scratch) {
2294  // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2295  // - Inputs lower than 0 (including -infinity) produce 0.
2296  // - Inputs higher than 255 (including +infinity) produce 255.
2297  // Also, it seems that PIXEL types use round-to-nearest rather than
2298  // round-towards-zero.
2299 
2300  // Squash +infinity before the conversion, since Fcvtnu will normally
2301  // convert it to 0.
2302  Fmov(dbl_scratch, 255);
2303  Fmin(dbl_scratch, dbl_scratch, input);
2304 
2305  // Convert double to unsigned integer. Values less than zero become zero.
2306  // Values greater than 255 have already been clamped to 255.
2307  Fcvtnu(output, dbl_scratch);
2308 }
2309 
2310 
2311 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
2312  Register src,
2313  unsigned count,
2314  Register scratch1,
2315  Register scratch2,
2316  Register scratch3,
2317  Register scratch4,
2318  Register scratch5) {
2319  // Untag src and dst into scratch registers.
2320  // Copy src->dst in a tight loop.
2321  ASSERT(!AreAliased(dst, src,
2322  scratch1, scratch2, scratch3, scratch4, scratch5));
2323  ASSERT(count >= 2);
2324 
2325  const Register& remaining = scratch3;
2326  Mov(remaining, count / 2);
2327 
2328  const Register& dst_untagged = scratch1;
2329  const Register& src_untagged = scratch2;
2330  Sub(dst_untagged, dst, kHeapObjectTag);
2331  Sub(src_untagged, src, kHeapObjectTag);
2332 
2333  // Copy fields in pairs.
2334  Label loop;
2335  Bind(&loop);
2336  Ldp(scratch4, scratch5,
2337  MemOperand(src_untagged, kXRegSize* 2, PostIndex));
2338  Stp(scratch4, scratch5,
2339  MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
2340  Sub(remaining, remaining, 1);
2341  Cbnz(remaining, &loop);
2342 
2343  // Handle the leftovers.
2344  if (count & 1) {
2345  Ldr(scratch4, MemOperand(src_untagged));
2346  Str(scratch4, MemOperand(dst_untagged));
2347  }
2348 }
2349 
2350 
2351 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
2352  Register src,
2353  unsigned count,
2354  Register scratch1,
2355  Register scratch2,
2356  Register scratch3,
2357  Register scratch4) {
2358  // Untag src and dst into scratch registers.
2359  // Copy src->dst in an unrolled loop.
2360  ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
2361 
2362  const Register& dst_untagged = scratch1;
2363  const Register& src_untagged = scratch2;
2364  sub(dst_untagged, dst, kHeapObjectTag);
2365  sub(src_untagged, src, kHeapObjectTag);
2366 
2367  // Copy fields in pairs.
2368  for (unsigned i = 0; i < count / 2; i++) {
2369  Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
2370  Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
2371  }
2372 
2373  // Handle the leftovers.
2374  if (count & 1) {
2375  Ldr(scratch3, MemOperand(src_untagged));
2376  Str(scratch3, MemOperand(dst_untagged));
2377  }
2378 }
2379 
2380 
2381 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
2382  Register src,
2383  unsigned count,
2384  Register scratch1,
2385  Register scratch2,
2386  Register scratch3) {
2387  // Untag src and dst into scratch registers.
2388  // Copy src->dst in an unrolled loop.
2389  ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
2390 
2391  const Register& dst_untagged = scratch1;
2392  const Register& src_untagged = scratch2;
2393  Sub(dst_untagged, dst, kHeapObjectTag);
2394  Sub(src_untagged, src, kHeapObjectTag);
2395 
2396  // Copy fields one by one.
2397  for (unsigned i = 0; i < count; i++) {
2398  Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
2399  Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
2400  }
2401 }
2402 
2403 
2404 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
2405  unsigned count) {
2406  // One of two methods is used:
2407  //
2408  // For high 'count' values where many scratch registers are available:
2409  // Untag src and dst into scratch registers.
2410  // Copy src->dst in a tight loop.
2411  //
2412  // For low 'count' values or where few scratch registers are available:
2413  // Untag src and dst into scratch registers.
2414  // Copy src->dst in an unrolled loop.
2415  //
2416  // In both cases, fields are copied in pairs if possible, and left-overs are
2417  // handled separately.
2418  ASSERT(!AreAliased(dst, src));
2419  ASSERT(!temps.IncludesAliasOf(dst));
2420  ASSERT(!temps.IncludesAliasOf(src));
2421  ASSERT(!temps.IncludesAliasOf(xzr));
2422 
2423  if (emit_debug_code()) {
2424  Cmp(dst, src);
2425  Check(ne, kTheSourceAndDestinationAreTheSame);
2426  }
2427 
2428  // The value of 'count' at which a loop will be generated (if there are
2429  // enough scratch registers).
2430  static const unsigned kLoopThreshold = 8;
2431 
2432  UseScratchRegisterScope masm_temps(this);
2433  if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
2434  CopyFieldsLoopPairsHelper(dst, src, count,
2435  Register(temps.PopLowestIndex()),
2436  Register(temps.PopLowestIndex()),
2437  Register(temps.PopLowestIndex()),
2438  masm_temps.AcquireX(),
2439  masm_temps.AcquireX());
2440  } else if (temps.Count() >= 2) {
2441  CopyFieldsUnrolledPairsHelper(dst, src, count,
2442  Register(temps.PopLowestIndex()),
2443  Register(temps.PopLowestIndex()),
2444  masm_temps.AcquireX(),
2445  masm_temps.AcquireX());
2446  } else if (temps.Count() == 1) {
2447  CopyFieldsUnrolledHelper(dst, src, count,
2448  Register(temps.PopLowestIndex()),
2449  masm_temps.AcquireX(),
2450  masm_temps.AcquireX());
2451  } else {
2452  UNREACHABLE();
2453  }
2454 }
2455 
2456 
2457 void MacroAssembler::CopyBytes(Register dst,
2458  Register src,
2459  Register length,
2460  Register scratch,
2461  CopyHint hint) {
2462  UseScratchRegisterScope temps(this);
2463  Register tmp1 = temps.AcquireX();
2464  Register tmp2 = temps.AcquireX();
2465  ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
2466  ASSERT(!AreAliased(src, dst, csp));
2467 
2468  if (emit_debug_code()) {
2469  // Check copy length.
2470  Cmp(length, 0);
2471  Assert(ge, kUnexpectedNegativeValue);
2472 
2473  // Check src and dst buffers don't overlap.
2474  Add(scratch, src, length); // Calculate end of src buffer.
2475  Cmp(scratch, dst);
2476  Add(scratch, dst, length); // Calculate end of dst buffer.
2477  Ccmp(scratch, src, ZFlag, gt);
2478  Assert(le, kCopyBuffersOverlap);
2479  }
2480 
2481  Label short_copy, short_loop, bulk_loop, done;
2482 
2483  if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
2484  Register bulk_length = scratch;
2485  int pair_size = 2 * kXRegSize;
2486  int pair_mask = pair_size - 1;
2487 
2488  Bic(bulk_length, length, pair_mask);
2489  Cbz(bulk_length, &short_copy);
2490  Bind(&bulk_loop);
2491  Sub(bulk_length, bulk_length, pair_size);
2492  Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
2493  Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
2494  Cbnz(bulk_length, &bulk_loop);
2495 
2496  And(length, length, pair_mask);
2497  }
2498 
2499  Bind(&short_copy);
2500  Cbz(length, &done);
2501  Bind(&short_loop);
2502  Sub(length, length, 1);
2503  Ldrb(tmp1, MemOperand(src, 1, PostIndex));
2504  Strb(tmp1, MemOperand(dst, 1, PostIndex));
2505  Cbnz(length, &short_loop);
2506 
2507 
2508  Bind(&done);
2509 }
2510 
2511 
2512 void MacroAssembler::FillFields(Register dst,
2513  Register field_count,
2514  Register filler) {
2515  ASSERT(!dst.Is(csp));
2516  UseScratchRegisterScope temps(this);
2517  Register field_ptr = temps.AcquireX();
2518  Register counter = temps.AcquireX();
2519  Label done;
2520 
2521  // Decrement count. If the result < zero, count was zero, and there's nothing
2522  // to do. If count was one, flags are set to fail the gt condition at the end
2523  // of the pairs loop.
2524  Subs(counter, field_count, 1);
2525  B(lt, &done);
2526 
2527  // There's at least one field to fill, so do this unconditionally.
2528  Str(filler, MemOperand(dst, kPointerSize, PostIndex));
2529 
2530  // If the bottom bit of counter is set, there are an even number of fields to
2531  // fill, so pull the start pointer back by one field, allowing the pairs loop
2532  // to overwrite the field that was stored above.
2533  And(field_ptr, counter, 1);
2534  Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
2535 
2536  // Store filler to memory in pairs.
2537  Label entry, loop;
2538  B(&entry);
2539  Bind(&loop);
2540  Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
2541  Subs(counter, counter, 2);
2542  Bind(&entry);
2543  B(gt, &loop);
2544 
2545  Bind(&done);
2546 }
2547 
2548 
2550  Register first,
2551  Register second,
2552  Register scratch1,
2553  Register scratch2,
2554  Label* failure,
2555  SmiCheckType smi_check) {
2556 
2557  if (smi_check == DO_SMI_CHECK) {
2558  JumpIfEitherSmi(first, second, failure);
2559  } else if (emit_debug_code()) {
2560  ASSERT(smi_check == DONT_DO_SMI_CHECK);
2561  Label not_smi;
2562  JumpIfEitherSmi(first, second, NULL, &not_smi);
2563 
2564  // At least one input is a smi, but the flags indicated a smi check wasn't
2565  // needed.
2566  Abort(kUnexpectedSmi);
2567 
2568  Bind(&not_smi);
2569  }
2570 
2571  // Test that both first and second are sequential ASCII strings.
2572  Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2573  Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2574  Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2575  Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2576 
2578  scratch2,
2579  scratch1,
2580  scratch2,
2581  failure);
2582 }
2583 
2584 
2586  Register first,
2587  Register second,
2588  Register scratch1,
2589  Register scratch2,
2590  Label* failure) {
2591  ASSERT(!AreAliased(scratch1, second));
2592  ASSERT(!AreAliased(scratch1, scratch2));
2593  static const int kFlatAsciiStringMask =
2595  static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2596  And(scratch1, first, kFlatAsciiStringMask);
2597  And(scratch2, second, kFlatAsciiStringMask);
2598  Cmp(scratch1, kFlatAsciiStringTag);
2599  Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
2600  B(ne, failure);
2601 }
2602 
2603 
2605  Register scratch,
2606  Label* failure) {
2607  const int kFlatAsciiStringMask =
2609  const int kFlatAsciiStringTag =
2611  And(scratch, type, kFlatAsciiStringMask);
2612  Cmp(scratch, kFlatAsciiStringTag);
2613  B(ne, failure);
2614 }
2615 
2616 
2618  Register first,
2619  Register second,
2620  Register scratch1,
2621  Register scratch2,
2622  Label* failure) {
2623  ASSERT(!AreAliased(first, second, scratch1, scratch2));
2624  const int kFlatAsciiStringMask =
2626  const int kFlatAsciiStringTag =
2628  And(scratch1, first, kFlatAsciiStringMask);
2629  And(scratch2, second, kFlatAsciiStringMask);
2630  Cmp(scratch1, kFlatAsciiStringTag);
2631  Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
2632  B(ne, failure);
2633 }
2634 
2635 
2636 void MacroAssembler::JumpIfNotUniqueName(Register type,
2637  Label* not_unique_name) {
2638  STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2639  // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2640  // continue
2641  // } else {
2642  // goto not_unique_name
2643  // }
2645  Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2646  B(ne, not_unique_name);
2647 }
2648 
2649 
2650 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2651  const ParameterCount& actual,
2652  Handle<Code> code_constant,
2653  Register code_reg,
2654  Label* done,
2655  InvokeFlag flag,
2656  bool* definitely_mismatches,
2657  const CallWrapper& call_wrapper) {
2658  bool definitely_matches = false;
2659  *definitely_mismatches = false;
2660  Label regular_invoke;
2661 
2662  // Check whether the expected and actual arguments count match. If not,
2663  // setup registers according to contract with ArgumentsAdaptorTrampoline:
2664  // x0: actual arguments count.
2665  // x1: function (passed through to callee).
2666  // x2: expected arguments count.
2667 
2668  // The code below is made a lot easier because the calling code already sets
2669  // up actual and expected registers according to the contract if values are
2670  // passed in registers.
2671  ASSERT(actual.is_immediate() || actual.reg().is(x0));
2672  ASSERT(expected.is_immediate() || expected.reg().is(x2));
2673  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
2674 
2675  if (expected.is_immediate()) {
2676  ASSERT(actual.is_immediate());
2677  if (expected.immediate() == actual.immediate()) {
2678  definitely_matches = true;
2679 
2680  } else {
2681  Mov(x0, actual.immediate());
2682  if (expected.immediate() ==
2684  // Don't worry about adapting arguments for builtins that
2685  // don't want that done. Skip adaption code by making it look
2686  // like we have a match between expected and actual number of
2687  // arguments.
2688  definitely_matches = true;
2689  } else {
2690  *definitely_mismatches = true;
2691  // Set up x2 for the argument adaptor.
2692  Mov(x2, expected.immediate());
2693  }
2694  }
2695 
2696  } else { // expected is a register.
2697  Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2698  : Operand(actual.reg());
2699  // If actual == expected perform a regular invocation.
2700  Cmp(expected.reg(), actual_op);
2701  B(eq, &regular_invoke);
2702  // Otherwise set up x0 for the argument adaptor.
2703  Mov(x0, actual_op);
2704  }
2705 
2706  // If the argument counts may mismatch, generate a call to the argument
2707  // adaptor.
2708  if (!definitely_matches) {
2709  if (!code_constant.is_null()) {
2710  Mov(x3, Operand(code_constant));
2712  }
2713 
2714  Handle<Code> adaptor =
2715  isolate()->builtins()->ArgumentsAdaptorTrampoline();
2716  if (flag == CALL_FUNCTION) {
2717  call_wrapper.BeforeCall(CallSize(adaptor));
2718  Call(adaptor);
2719  call_wrapper.AfterCall();
2720  if (!*definitely_mismatches) {
2721  // If the arg counts don't match, no extra code is emitted by
2722  // MAsm::InvokeCode and we can just fall through.
2723  B(done);
2724  }
2725  } else {
2726  Jump(adaptor, RelocInfo::CODE_TARGET);
2727  }
2728  }
2729  Bind(&regular_invoke);
2730 }
2731 
2732 
2733 void MacroAssembler::InvokeCode(Register code,
2734  const ParameterCount& expected,
2735  const ParameterCount& actual,
2736  InvokeFlag flag,
2737  const CallWrapper& call_wrapper) {
2738  // You can't call a function without a valid frame.
2739  ASSERT(flag == JUMP_FUNCTION || has_frame());
2740 
2741  Label done;
2742 
2743  bool definitely_mismatches = false;
2744  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2745  &definitely_mismatches, call_wrapper);
2746 
2747  // If we are certain that actual != expected, then we know InvokePrologue will
2748  // have handled the call through the argument adaptor mechanism.
2749  // The called function expects the call kind in x5.
2750  if (!definitely_mismatches) {
2751  if (flag == CALL_FUNCTION) {
2752  call_wrapper.BeforeCall(CallSize(code));
2753  Call(code);
2754  call_wrapper.AfterCall();
2755  } else {
2756  ASSERT(flag == JUMP_FUNCTION);
2757  Jump(code);
2758  }
2759  }
2760 
2761  // Continue here if InvokePrologue does handle the invocation due to
2762  // mismatched parameter counts.
2763  Bind(&done);
2764 }
2765 
2766 
2767 void MacroAssembler::InvokeFunction(Register function,
2768  const ParameterCount& actual,
2769  InvokeFlag flag,
2770  const CallWrapper& call_wrapper) {
2771  // You can't call a function without a valid frame.
2772  ASSERT(flag == JUMP_FUNCTION || has_frame());
2773 
2774  // Contract with called JS functions requires that function is passed in x1.
2775  // (See FullCodeGenerator::Generate().)
2776  ASSERT(function.is(x1));
2777 
2778  Register expected_reg = x2;
2779  Register code_reg = x3;
2780 
2782  // The number of arguments is stored as an int32_t, and -1 is a marker
2783  // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2784  // extension to correctly handle it.
2785  Ldr(expected_reg, FieldMemOperand(function,
2787  Ldrsw(expected_reg,
2788  FieldMemOperand(expected_reg,
2790  Ldr(code_reg,
2792 
2793  ParameterCount expected(expected_reg);
2794  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2795 }
2796 
2797 
2798 void MacroAssembler::InvokeFunction(Register function,
2799  const ParameterCount& expected,
2800  const ParameterCount& actual,
2801  InvokeFlag flag,
2802  const CallWrapper& call_wrapper) {
2803  // You can't call a function without a valid frame.
2804  ASSERT(flag == JUMP_FUNCTION || has_frame());
2805 
2806  // Contract with called JS functions requires that function is passed in x1.
2807  // (See FullCodeGenerator::Generate().)
2808  ASSERT(function.Is(x1));
2809 
2810  Register code_reg = x3;
2811 
2812  // Set up the context.
2814 
2815  // We call indirectly through the code field in the function to
2816  // allow recompilation to take effect without changing any of the
2817  // call sites.
2818  Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2819  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2820 }
2821 
2822 
2823 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2824  const ParameterCount& expected,
2825  const ParameterCount& actual,
2826  InvokeFlag flag,
2827  const CallWrapper& call_wrapper) {
2828  // Contract with called JS functions requires that function is passed in x1.
2829  // (See FullCodeGenerator::Generate().)
2830  __ LoadObject(x1, function);
2831  InvokeFunction(x1, expected, actual, flag, call_wrapper);
2832 }
2833 
2834 
2835 void MacroAssembler::TryConvertDoubleToInt64(Register result,
2836  DoubleRegister double_input,
2837  Label* done) {
2838  // Try to convert with an FPU convert instruction. It's trivial to compute
2839  // the modulo operation on an integer register so we convert to a 64-bit
2840  // integer.
2841  //
2842  // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2843  // when the double is out of range. NaNs and infinities will be converted to 0
2844  // (as ECMA-262 requires).
2845  Fcvtzs(result.X(), double_input);
2846 
2847  // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2848  // representable using a double, so if the result is one of those then we know
2849  // that saturation occured, and we need to manually handle the conversion.
2850  //
2851  // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2852  // 1 will cause signed overflow.
2853  Cmp(result.X(), 1);
2854  Ccmp(result.X(), -1, VFlag, vc);
2855 
2856  B(vc, done);
2857 }
2858 
2859 
2860 void MacroAssembler::TruncateDoubleToI(Register result,
2861  DoubleRegister double_input) {
2862  Label done;
2863  ASSERT(jssp.Is(StackPointer()));
2864 
2865  // Try to convert the double to an int64. If successful, the bottom 32 bits
2866  // contain our truncated int32 result.
2867  TryConvertDoubleToInt64(result, double_input, &done);
2868 
2869  // If we fell through then inline version didn't succeed - call stub instead.
2870  Push(lr);
2871  Push(double_input); // Put input on stack.
2872 
2873  DoubleToIStub stub(jssp,
2874  result,
2875  0,
2876  true, // is_truncating
2877  true); // skip_fastpath
2878  CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2879 
2880  Drop(1, kDoubleSize); // Drop the double input on the stack.
2881  Pop(lr);
2882 
2883  Bind(&done);
2884 }
2885 
2886 
2887 void MacroAssembler::TruncateHeapNumberToI(Register result,
2888  Register object) {
2889  Label done;
2890  ASSERT(!result.is(object));
2891  ASSERT(jssp.Is(StackPointer()));
2892 
2893  Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2894 
2895  // Try to convert the double to an int64. If successful, the bottom 32 bits
2896  // contain our truncated int32 result.
2897  TryConvertDoubleToInt64(result, fp_scratch, &done);
2898 
2899  // If we fell through then inline version didn't succeed - call stub instead.
2900  Push(lr);
2901  DoubleToIStub stub(object,
2902  result,
2904  true, // is_truncating
2905  true); // skip_fastpath
2906  CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2907  Pop(lr);
2908 
2909  Bind(&done);
2910 }
2911 
2912 
2914  if (frame_mode == BUILD_STUB_FRAME) {
2915  ASSERT(StackPointer().Is(jssp));
2916  UseScratchRegisterScope temps(this);
2917  Register temp = temps.AcquireX();
2919  // Compiled stubs don't age, and so they don't need the predictable code
2920  // ageing sequence.
2921  __ Push(lr, fp, cp, temp);
2923  } else {
2924  if (isolate()->IsCodePreAgingActive()) {
2925  Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2926  __ EmitCodeAgeSequence(stub);
2927  } else {
2929  }
2930  }
2931 }
2932 
2933 
2935  ASSERT(jssp.Is(StackPointer()));
2936  UseScratchRegisterScope temps(this);
2937  Register type_reg = temps.AcquireX();
2938  Register code_reg = temps.AcquireX();
2939 
2940  Push(lr, fp, cp);
2941  Mov(type_reg, Smi::FromInt(type));
2942  Mov(code_reg, Operand(CodeObject()));
2943  Push(type_reg, code_reg);
2944  // jssp[4] : lr
2945  // jssp[3] : fp
2946  // jssp[2] : cp
2947  // jssp[1] : type
2948  // jssp[0] : code object
2949 
2950  // Adjust FP to point to saved FP.
2952 }
2953 
2954 
2956  ASSERT(jssp.Is(StackPointer()));
2957  // Drop the execution stack down to the frame pointer and restore
2958  // the caller frame pointer and return address.
2959  Mov(jssp, fp);
2961  Pop(fp, lr);
2962 }
2963 
2964 
2967 }
2968 
2969 
2971  // Read the registers from the stack without popping them. The stack pointer
2972  // will be reset as part of the unwinding process.
2973  CPURegList saved_fp_regs = kCallerSavedFP;
2974  ASSERT(saved_fp_regs.Count() % 2 == 0);
2975 
2977  while (!saved_fp_regs.IsEmpty()) {
2978  const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2979  const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2980  offset -= 2 * kDRegSize;
2981  Ldp(dst1, dst0, MemOperand(fp, offset));
2982  }
2983 }
2984 
2985 
2986 void MacroAssembler::EnterExitFrame(bool save_doubles,
2987  const Register& scratch,
2988  int extra_space) {
2989  ASSERT(jssp.Is(StackPointer()));
2990 
2991  // Set up the new stack frame.
2992  Mov(scratch, Operand(CodeObject()));
2993  Push(lr, fp);
2994  Mov(fp, StackPointer());
2995  Push(xzr, scratch);
2996  // fp[8]: CallerPC (lr)
2997  // fp -> fp[0]: CallerFP (old fp)
2998  // fp[-8]: Space reserved for SPOffset.
2999  // jssp -> fp[-16]: CodeObject()
3000  STATIC_ASSERT((2 * kPointerSize) ==
3006 
3007  // Save the frame pointer and context pointer in the top frame.
3008  Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3009  isolate())));
3010  Str(fp, MemOperand(scratch));
3011  Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3012  isolate())));
3013  Str(cp, MemOperand(scratch));
3014 
3015  STATIC_ASSERT((-2 * kPointerSize) ==
3017  if (save_doubles) {
3019  }
3020 
3021  // Reserve space for the return address and for user requested memory.
3022  // We do this before aligning to make sure that we end up correctly
3023  // aligned with the minimum of wasted space.
3024  Claim(extra_space + 1, kXRegSize);
3025  // fp[8]: CallerPC (lr)
3026  // fp -> fp[0]: CallerFP (old fp)
3027  // fp[-8]: Space reserved for SPOffset.
3028  // fp[-16]: CodeObject()
3029  // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
3030  // jssp[8]: Extra space reserved for caller (if extra_space != 0).
3031  // jssp -> jssp[0]: Space reserved for the return address.
3032 
3033  // Align and synchronize the system stack pointer with jssp.
3035  ASSERT(csp.Is(StackPointer()));
3036 
3037  // fp[8]: CallerPC (lr)
3038  // fp -> fp[0]: CallerFP (old fp)
3039  // fp[-8]: Space reserved for SPOffset.
3040  // fp[-16]: CodeObject()
3041  // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
3042  // csp[8]: Memory reserved for the caller if extra_space != 0.
3043  // Alignment padding, if necessary.
3044  // csp -> csp[0]: Space reserved for the return address.
3045 
3046  // ExitFrame::GetStateForFramePointer expects to find the return address at
3047  // the memory address immediately below the pointer stored in SPOffset.
3048  // It is not safe to derive much else from SPOffset, because the size of the
3049  // padding can vary.
3050  Add(scratch, csp, kXRegSize);
3052 }
3053 
3054 
3055 // Leave the current exit frame.
3056 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
3057  const Register& scratch,
3058  bool restore_context) {
3059  ASSERT(csp.Is(StackPointer()));
3060 
3061  if (restore_doubles) {
3063  }
3064 
3065  // Restore the context pointer from the top frame.
3066  if (restore_context) {
3067  Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3068  isolate())));
3069  Ldr(cp, MemOperand(scratch));
3070  }
3071 
3072  if (emit_debug_code()) {
3073  // Also emit debug code to clear the cp in the top frame.
3074  Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3075  isolate())));
3076  Str(xzr, MemOperand(scratch));
3077  }
3078  // Clear the frame pointer from the top frame.
3079  Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3080  isolate())));
3081  Str(xzr, MemOperand(scratch));
3082 
3083  // Pop the exit frame.
3084  // fp[8]: CallerPC (lr)
3085  // fp -> fp[0]: CallerFP (old fp)
3086  // fp[...]: The rest of the frame.
3087  Mov(jssp, fp);
3088  SetStackPointer(jssp);
3090  Pop(fp, lr);
3091 }
3092 
3093 
3094 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
3095  Register scratch1, Register scratch2) {
3096  if (FLAG_native_code_counters && counter->Enabled()) {
3097  Mov(scratch1, value);
3098  Mov(scratch2, ExternalReference(counter));
3099  Str(scratch1, MemOperand(scratch2));
3100  }
3101 }
3102 
3103 
3104 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
3105  Register scratch1, Register scratch2) {
3106  ASSERT(value != 0);
3107  if (FLAG_native_code_counters && counter->Enabled()) {
3108  Mov(scratch2, ExternalReference(counter));
3109  Ldr(scratch1, MemOperand(scratch2));
3110  Add(scratch1, scratch1, value);
3111  Str(scratch1, MemOperand(scratch2));
3112  }
3113 }
3114 
3115 
3116 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
3117  Register scratch1, Register scratch2) {
3118  IncrementCounter(counter, -value, scratch1, scratch2);
3119 }
3120 
3121 
3122 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3123  if (context_chain_length > 0) {
3124  // Move up the chain of contexts to the context containing the slot.
3126  for (int i = 1; i < context_chain_length; i++) {
3128  }
3129  } else {
3130  // Slot is in the current function context. Move it into the
3131  // destination register in case we store into it (the write barrier
3132  // cannot be allowed to destroy the context in cp).
3133  Mov(dst, cp);
3134  }
3135 }
3136 
3137 
3138 #ifdef ENABLE_DEBUGGER_SUPPORT
3139 void MacroAssembler::DebugBreak() {
3140  Mov(x0, 0);
3141  Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
3142  CEntryStub ces(1);
3143  ASSERT(AllowThisStubCall(&ces));
3144  Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
3145 }
3146 #endif
3147 
3148 
3149 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3150  int handler_index) {
3151  ASSERT(jssp.Is(StackPointer()));
3152  // Adjust this code if the asserts don't hold.
3159 
3160  // For the JSEntry handler, we must preserve the live registers x0-x4.
3161  // (See JSEntryStub::GenerateBody().)
3162 
3163  unsigned state =
3164  StackHandler::IndexField::encode(handler_index) |
3165  StackHandler::KindField::encode(kind);
3166 
3167  // Set up the code object and the state for pushing.
3168  Mov(x10, Operand(CodeObject()));
3169  Mov(x11, state);
3170 
3171  // Push the frame pointer, context, state, and code object.
3172  if (kind == StackHandler::JS_ENTRY) {
3173  ASSERT(Smi::FromInt(0) == 0);
3174  Push(xzr, xzr, x11, x10);
3175  } else {
3176  Push(fp, cp, x11, x10);
3177  }
3178 
3179  // Link the current handler as the next handler.
3180  Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3181  Ldr(x10, MemOperand(x11));
3182  Push(x10);
3183  // Set this new handler as the current one.
3184  Str(jssp, MemOperand(x11));
3185 }
3186 
3187 
3190  Pop(x10);
3191  Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3193  Str(x10, MemOperand(x11));
3194 }
3195 
3196 
3197 void MacroAssembler::Allocate(int object_size,
3198  Register result,
3199  Register scratch1,
3200  Register scratch2,
3201  Label* gc_required,
3203  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
3204  if (!FLAG_inline_new) {
3205  if (emit_debug_code()) {
3206  // Trash the registers to simulate an allocation failure.
3207  // We apply salt to the original zap value to easily spot the values.
3208  Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3209  Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3210  Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3211  }
3212  B(gc_required);
3213  return;
3214  }
3215 
3216  UseScratchRegisterScope temps(this);
3217  Register scratch3 = temps.AcquireX();
3218 
3219  ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
3220  ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3221 
3222  // Make object size into bytes.
3223  if ((flags & SIZE_IN_WORDS) != 0) {
3224  object_size *= kPointerSize;
3225  }
3226  ASSERT(0 == (object_size & kObjectAlignmentMask));
3227 
3228  // Check relative positions of allocation top and limit addresses.
3229  // The values must be adjacent in memory to allow the use of LDP.
3230  ExternalReference heap_allocation_top =
3232  ExternalReference heap_allocation_limit =
3234  intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3235  intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3236  ASSERT((limit - top) == kPointerSize);
3237 
3238  // Set up allocation top address and object size registers.
3239  Register top_address = scratch1;
3240  Register allocation_limit = scratch2;
3241  Mov(top_address, Operand(heap_allocation_top));
3242 
3243  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3244  // Load allocation top into result and the allocation limit.
3245  Ldp(result, allocation_limit, MemOperand(top_address));
3246  } else {
3247  if (emit_debug_code()) {
3248  // Assert that result actually contains top on entry.
3249  Ldr(scratch3, MemOperand(top_address));
3250  Cmp(result, scratch3);
3251  Check(eq, kUnexpectedAllocationTop);
3252  }
3253  // Load the allocation limit. 'result' already contains the allocation top.
3254  Ldr(allocation_limit, MemOperand(top_address, limit - top));
3255  }
3256 
3257  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3258  // the same alignment on ARM64.
3260 
3261  // Calculate new top and bail out if new space is exhausted.
3262  Adds(scratch3, result, object_size);
3263  B(vs, gc_required);
3264  Cmp(scratch3, allocation_limit);
3265  B(hi, gc_required);
3266  Str(scratch3, MemOperand(top_address));
3267 
3268  // Tag the object if requested.
3269  if ((flags & TAG_OBJECT) != 0) {
3270  Orr(result, result, kHeapObjectTag);
3271  }
3272 }
3273 
3274 
3275 void MacroAssembler::Allocate(Register object_size,
3276  Register result,
3277  Register scratch1,
3278  Register scratch2,
3279  Label* gc_required,
3280  AllocationFlags flags) {
3281  if (!FLAG_inline_new) {
3282  if (emit_debug_code()) {
3283  // Trash the registers to simulate an allocation failure.
3284  // We apply salt to the original zap value to easily spot the values.
3285  Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3286  Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3287  Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3288  }
3289  B(gc_required);
3290  return;
3291  }
3292 
3293  UseScratchRegisterScope temps(this);
3294  Register scratch3 = temps.AcquireX();
3295 
3296  ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
3297  ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
3298  scratch1.Is64Bits() && scratch2.Is64Bits());
3299 
3300  // Check relative positions of allocation top and limit addresses.
3301  // The values must be adjacent in memory to allow the use of LDP.
3302  ExternalReference heap_allocation_top =
3304  ExternalReference heap_allocation_limit =
3306  intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3307  intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3308  ASSERT((limit - top) == kPointerSize);
3309 
3310  // Set up allocation top address and object size registers.
3311  Register top_address = scratch1;
3312  Register allocation_limit = scratch2;
3313  Mov(top_address, heap_allocation_top);
3314 
3315  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3316  // Load allocation top into result and the allocation limit.
3317  Ldp(result, allocation_limit, MemOperand(top_address));
3318  } else {
3319  if (emit_debug_code()) {
3320  // Assert that result actually contains top on entry.
3321  Ldr(scratch3, MemOperand(top_address));
3322  Cmp(result, scratch3);
3323  Check(eq, kUnexpectedAllocationTop);
3324  }
3325  // Load the allocation limit. 'result' already contains the allocation top.
3326  Ldr(allocation_limit, MemOperand(top_address, limit - top));
3327  }
3328 
3329  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3330  // the same alignment on ARM64.
3332 
3333  // Calculate new top and bail out if new space is exhausted
3334  if ((flags & SIZE_IN_WORDS) != 0) {
3335  Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
3336  } else {
3337  Adds(scratch3, result, object_size);
3338  }
3339 
3340  if (emit_debug_code()) {
3341  Tst(scratch3, kObjectAlignmentMask);
3342  Check(eq, kUnalignedAllocationInNewSpace);
3343  }
3344 
3345  B(vs, gc_required);
3346  Cmp(scratch3, allocation_limit);
3347  B(hi, gc_required);
3348  Str(scratch3, MemOperand(top_address));
3349 
3350  // Tag the object if requested.
3351  if ((flags & TAG_OBJECT) != 0) {
3352  Orr(result, result, kHeapObjectTag);
3353  }
3354 }
3355 
3356 
3357 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3358  Register scratch) {
3359  ExternalReference new_space_allocation_top =
3360  ExternalReference::new_space_allocation_top_address(isolate());
3361 
3362  // Make sure the object has no tag before resetting top.
3363  Bic(object, object, kHeapObjectTagMask);
3364 #ifdef DEBUG
3365  // Check that the object un-allocated is below the current top.
3366  Mov(scratch, new_space_allocation_top);
3367  Ldr(scratch, MemOperand(scratch));
3368  Cmp(object, scratch);
3369  Check(lt, kUndoAllocationOfNonAllocatedMemory);
3370 #endif
3371  // Write the address of the object to un-allocate as the current top.
3372  Mov(scratch, new_space_allocation_top);
3373  Str(object, MemOperand(scratch));
3374 }
3375 
3376 
3377 void MacroAssembler::AllocateTwoByteString(Register result,
3378  Register length,
3379  Register scratch1,
3380  Register scratch2,
3381  Register scratch3,
3382  Label* gc_required) {
3383  ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
3384  // Calculate the number of bytes needed for the characters in the string while
3385  // observing object alignment.
3387  Add(scratch1, length, length); // Length in bytes, not chars.
3389  Bic(scratch1, scratch1, kObjectAlignmentMask);
3390 
3391  // Allocate two-byte string in new space.
3392  Allocate(scratch1,
3393  result,
3394  scratch2,
3395  scratch3,
3396  gc_required,
3397  TAG_OBJECT);
3398 
3399  // Set the map, length and hash field.
3400  InitializeNewString(result,
3401  length,
3402  Heap::kStringMapRootIndex,
3403  scratch1,
3404  scratch2);
3405 }
3406 
3407 
3408 void MacroAssembler::AllocateAsciiString(Register result,
3409  Register length,
3410  Register scratch1,
3411  Register scratch2,
3412  Register scratch3,
3413  Label* gc_required) {
3414  ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
3415  // Calculate the number of bytes needed for the characters in the string while
3416  // observing object alignment.
3418  STATIC_ASSERT(kCharSize == 1);
3420  Bic(scratch1, scratch1, kObjectAlignmentMask);
3421 
3422  // Allocate ASCII string in new space.
3423  Allocate(scratch1,
3424  result,
3425  scratch2,
3426  scratch3,
3427  gc_required,
3428  TAG_OBJECT);
3429 
3430  // Set the map, length and hash field.
3431  InitializeNewString(result,
3432  length,
3433  Heap::kAsciiStringMapRootIndex,
3434  scratch1,
3435  scratch2);
3436 }
3437 
3438 
3439 void MacroAssembler::AllocateTwoByteConsString(Register result,
3440  Register length,
3441  Register scratch1,
3442  Register scratch2,
3443  Label* gc_required) {
3444  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3445  TAG_OBJECT);
3446 
3447  InitializeNewString(result,
3448  length,
3449  Heap::kConsStringMapRootIndex,
3450  scratch1,
3451  scratch2);
3452 }
3453 
3454 
3455 void MacroAssembler::AllocateAsciiConsString(Register result,
3456  Register length,
3457  Register scratch1,
3458  Register scratch2,
3459  Label* gc_required) {
3460  Label allocate_new_space, install_map;
3461  AllocationFlags flags = TAG_OBJECT;
3462 
3463  ExternalReference high_promotion_mode = ExternalReference::
3464  new_space_high_promotion_mode_active_address(isolate());
3465  Mov(scratch1, high_promotion_mode);
3466  Ldr(scratch1, MemOperand(scratch1));
3467  Cbz(scratch1, &allocate_new_space);
3468 
3470  result,
3471  scratch1,
3472  scratch2,
3473  gc_required,
3474  static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
3475 
3476  B(&install_map);
3477 
3478  Bind(&allocate_new_space);
3480  result,
3481  scratch1,
3482  scratch2,
3483  gc_required,
3484  flags);
3485 
3486  Bind(&install_map);
3487 
3488  InitializeNewString(result,
3489  length,
3490  Heap::kConsAsciiStringMapRootIndex,
3491  scratch1,
3492  scratch2);
3493 }
3494 
3495 
3496 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3497  Register length,
3498  Register scratch1,
3499  Register scratch2,
3500  Label* gc_required) {
3501  ASSERT(!AreAliased(result, length, scratch1, scratch2));
3502  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3503  TAG_OBJECT);
3504 
3505  InitializeNewString(result,
3506  length,
3507  Heap::kSlicedStringMapRootIndex,
3508  scratch1,
3509  scratch2);
3510 }
3511 
3512 
3513 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3514  Register length,
3515  Register scratch1,
3516  Register scratch2,
3517  Label* gc_required) {
3518  ASSERT(!AreAliased(result, length, scratch1, scratch2));
3519  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3520  TAG_OBJECT);
3521 
3522  InitializeNewString(result,
3523  length,
3524  Heap::kSlicedAsciiStringMapRootIndex,
3525  scratch1,
3526  scratch2);
3527 }
3528 
3529 
3530 // Allocates a heap number or jumps to the need_gc label if the young space
3531 // is full and a scavenge is needed.
3532 void MacroAssembler::AllocateHeapNumber(Register result,
3533  Label* gc_required,
3534  Register scratch1,
3535  Register scratch2,
3536  Register heap_number_map) {
3537  // Allocate an object in the heap for the heap number and tag it as a heap
3538  // object.
3539  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3540  TAG_OBJECT);
3541 
3542  // Store heap number map in the allocated object.
3543  if (heap_number_map.Is(NoReg)) {
3544  heap_number_map = scratch1;
3545  LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3546  }
3547  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3548  Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3549 }
3550 
3551 
3552 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3553  DoubleRegister value,
3554  Label* gc_required,
3555  Register scratch1,
3556  Register scratch2,
3557  Register heap_number_map) {
3558  // TODO(all): Check if it would be more efficient to use STP to store both
3559  // the map and the value.
3560  AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map);
3561  Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3562 }
3563 
3564 
3565 void MacroAssembler::JumpIfObjectType(Register object,
3566  Register map,
3567  Register type_reg,
3568  InstanceType type,
3569  Label* if_cond_pass,
3570  Condition cond) {
3571  CompareObjectType(object, map, type_reg, type);
3572  B(cond, if_cond_pass);
3573 }
3574 
3575 
3576 void MacroAssembler::JumpIfNotObjectType(Register object,
3577  Register map,
3578  Register type_reg,
3579  InstanceType type,
3580  Label* if_not_object) {
3581  JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3582 }
3583 
3584 
3585 // Sets condition flags based on comparison, and returns type in type_reg.
3586 void MacroAssembler::CompareObjectType(Register object,
3587  Register map,
3588  Register type_reg,
3589  InstanceType type) {
3591  CompareInstanceType(map, type_reg, type);
3592 }
3593 
3594 
3595 // Sets condition flags based on comparison, and returns type in type_reg.
3596 void MacroAssembler::CompareInstanceType(Register map,
3597  Register type_reg,
3598  InstanceType type) {
3599  Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3600  Cmp(type_reg, type);
3601 }
3602 
3603 
3604 void MacroAssembler::CompareMap(Register obj,
3605  Register scratch,
3606  Handle<Map> map) {
3607  Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3608  CompareMap(scratch, map);
3609 }
3610 
3611 
3612 void MacroAssembler::CompareMap(Register obj_map,
3613  Handle<Map> map) {
3614  Cmp(obj_map, Operand(map));
3615 }
3616 
3617 
3618 void MacroAssembler::CheckMap(Register obj,
3619  Register scratch,
3620  Handle<Map> map,
3621  Label* fail,
3622  SmiCheckType smi_check_type) {
3623  if (smi_check_type == DO_SMI_CHECK) {
3624  JumpIfSmi(obj, fail);
3625  }
3626 
3627  CompareMap(obj, scratch, map);
3628  B(ne, fail);
3629 }
3630 
3631 
3632 void MacroAssembler::CheckMap(Register obj,
3633  Register scratch,
3634  Heap::RootListIndex index,
3635  Label* fail,
3636  SmiCheckType smi_check_type) {
3637  if (smi_check_type == DO_SMI_CHECK) {
3638  JumpIfSmi(obj, fail);
3639  }
3640  Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3641  JumpIfNotRoot(scratch, index, fail);
3642 }
3643 
3644 
3645 void MacroAssembler::CheckMap(Register obj_map,
3646  Handle<Map> map,
3647  Label* fail,
3648  SmiCheckType smi_check_type) {
3649  if (smi_check_type == DO_SMI_CHECK) {
3650  JumpIfSmi(obj_map, fail);
3651  }
3652 
3653  CompareMap(obj_map, map);
3654  B(ne, fail);
3655 }
3656 
3657 
3658 void MacroAssembler::DispatchMap(Register obj,
3659  Register scratch,
3660  Handle<Map> map,
3661  Handle<Code> success,
3662  SmiCheckType smi_check_type) {
3663  Label fail;
3664  if (smi_check_type == DO_SMI_CHECK) {
3665  JumpIfSmi(obj, &fail);
3666  }
3667  Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3668  Cmp(scratch, Operand(map));
3669  B(ne, &fail);
3670  Jump(success, RelocInfo::CODE_TARGET);
3671  Bind(&fail);
3672 }
3673 
3674 
3675 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3676  UseScratchRegisterScope temps(this);
3677  Register temp = temps.AcquireX();
3678  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3679  Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3680  Tst(temp, mask);
3681 }
3682 
3683 
3684 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3685  // Load the map's "bit field 2".
3686  __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3687  // Retrieve elements_kind from bit field 2.
3689 }
3690 
3691 
3692 void MacroAssembler::TryGetFunctionPrototype(Register function,
3693  Register result,
3694  Register scratch,
3695  Label* miss,
3696  BoundFunctionAction action) {
3697  ASSERT(!AreAliased(function, result, scratch));
3698 
3699  // Check that the receiver isn't a smi.
3700  JumpIfSmi(function, miss);
3701 
3702  // Check that the function really is a function. Load map into result reg.
3703  JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
3704 
3705  if (action == kMissOnBoundFunction) {
3706  Register scratch_w = scratch.W();
3707  Ldr(scratch,
3709  // On 64-bit platforms, compiler hints field is not a smi. See definition of
3710  // kCompilerHintsOffset in src/objects.h.
3711  Ldr(scratch_w,
3713  Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
3714  }
3715 
3716  // Make sure that the function has an instance prototype.
3717  Label non_instance;
3718  Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3719  Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
3720 
3721  // Get the prototype or initial map from the function.
3722  Ldr(result,
3724 
3725  // If the prototype or initial map is the hole, don't return it and simply
3726  // miss the cache instead. This will allow us to allocate a prototype object
3727  // on-demand in the runtime system.
3728  JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3729 
3730  // If the function does not have an initial map, we're done.
3731  Label done;
3732  JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3733 
3734  // Get the prototype from the initial map.
3735  Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3736  B(&done);
3737 
3738  // Non-instance prototype: fetch prototype from constructor field in initial
3739  // map.
3740  Bind(&non_instance);
3741  Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3742 
3743  // All done.
3744  Bind(&done);
3745 }
3746 
3747 
3748 void MacroAssembler::CompareRoot(const Register& obj,
3749  Heap::RootListIndex index) {
3750  UseScratchRegisterScope temps(this);
3751  Register temp = temps.AcquireX();
3752  ASSERT(!AreAliased(obj, temp));
3753  LoadRoot(temp, index);
3754  Cmp(obj, temp);
3755 }
3756 
3757 
3758 void MacroAssembler::JumpIfRoot(const Register& obj,
3759  Heap::RootListIndex index,
3760  Label* if_equal) {
3761  CompareRoot(obj, index);
3762  B(eq, if_equal);
3763 }
3764 
3765 
3766 void MacroAssembler::JumpIfNotRoot(const Register& obj,
3767  Heap::RootListIndex index,
3768  Label* if_not_equal) {
3769  CompareRoot(obj, index);
3770  B(ne, if_not_equal);
3771 }
3772 
3773 
3774 void MacroAssembler::CompareAndSplit(const Register& lhs,
3775  const Operand& rhs,
3776  Condition cond,
3777  Label* if_true,
3778  Label* if_false,
3779  Label* fall_through) {
3780  if ((if_true == if_false) && (if_false == fall_through)) {
3781  // Fall through.
3782  } else if (if_true == if_false) {
3783  B(if_true);
3784  } else if (if_false == fall_through) {
3785  CompareAndBranch(lhs, rhs, cond, if_true);
3786  } else if (if_true == fall_through) {
3787  CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false);
3788  } else {
3789  CompareAndBranch(lhs, rhs, cond, if_true);
3790  B(if_false);
3791  }
3792 }
3793 
3794 
3795 void MacroAssembler::TestAndSplit(const Register& reg,
3796  uint64_t bit_pattern,
3797  Label* if_all_clear,
3798  Label* if_any_set,
3799  Label* fall_through) {
3800  if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3801  // Fall through.
3802  } else if (if_all_clear == if_any_set) {
3803  B(if_all_clear);
3804  } else if (if_all_clear == fall_through) {
3805  TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3806  } else if (if_any_set == fall_through) {
3807  TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3808  } else {
3809  TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3810  B(if_all_clear);
3811  }
3812 }
3813 
3814 
3815 void MacroAssembler::CheckFastElements(Register map,
3816  Register scratch,
3817  Label* fail) {
3822  Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3824  B(hi, fail);
3825 }
3826 
3827 
3828 void MacroAssembler::CheckFastObjectElements(Register map,
3829  Register scratch,
3830  Label* fail) {
3835  Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3837  // If cond==ls, set cond=hi, otherwise compare.
3838  Ccmp(scratch,
3840  B(hi, fail);
3841 }
3842 
3843 
3844 // Note: The ARM version of this clobbers elements_reg, but this version does
3845 // not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3846 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3847  Register key_reg,
3848  Register elements_reg,
3849  Register scratch1,
3850  FPRegister fpscratch1,
3851  FPRegister fpscratch2,
3852  Label* fail,
3853  int elements_offset) {
3854  ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3855  Label store_num;
3856 
3857  // Speculatively convert the smi to a double - all smis can be exactly
3858  // represented as a double.
3859  SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3860 
3861  // If value_reg is a smi, we're done.
3862  JumpIfSmi(value_reg, &store_num);
3863 
3864  // Ensure that the object is a heap number.
3865  CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
3866  fail, DONT_DO_SMI_CHECK);
3867 
3868  Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3870 
3871  // Check for NaN by comparing the number to itself: NaN comparison will
3872  // report unordered, indicated by the overflow flag being set.
3873  Fcmp(fpscratch1, fpscratch1);
3874  Fcsel(fpscratch1, fpscratch2, fpscratch1, vs);
3875 
3876  // Store the result.
3877  Bind(&store_num);
3878  Add(scratch1, elements_reg,
3880  Str(fpscratch1,
3881  FieldMemOperand(scratch1,
3882  FixedDoubleArray::kHeaderSize - elements_offset));
3883 }
3884 
3885 
3886 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3887  return has_frame_ || !stub->SometimesSetsUpAFrame();
3888 }
3889 
3890 
3891 void MacroAssembler::IndexFromHash(Register hash, Register index) {
3892  // If the hash field contains an array index pick it out. The assert checks
3893  // that the constants for the maximum number of digits for an array index
3894  // cached in the hash field and the number of bits reserved for it does not
3895  // conflict.
3898  // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
3899  // the low kHashShift bits.
3900  STATIC_ASSERT(kSmiTag == 0);
3902  SmiTag(index, hash);
3903 }
3904 
3905 
3907  Register string,
3908  Register index,
3909  SeqStringSetCharCheckIndexType index_type,
3910  Register scratch,
3911  uint32_t encoding_mask) {
3912  ASSERT(!AreAliased(string, index, scratch));
3913 
3914  if (index_type == kIndexIsSmi) {
3915  AssertSmi(index);
3916  }
3917 
3918  // Check that string is an object.
3919  AssertNotSmi(string, kNonObject);
3920 
3921  // Check that string has an appropriate map.
3922  Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3923  Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3924 
3925  And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3926  Cmp(scratch, encoding_mask);
3927  Check(eq, kUnexpectedStringType);
3928 
3929  Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3930  Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3931  Check(lt, kIndexIsTooLarge);
3932 
3933  ASSERT_EQ(0, Smi::FromInt(0));
3934  Cmp(index, 0);
3935  Check(ge, kIndexIsNegative);
3936 }
3937 
3938 
3939 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3940  Register scratch1,
3941  Register scratch2,
3942  Label* miss) {
3943  ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
3944  Label same_contexts;
3945 
3946  // Load current lexical context from the stack frame.
3948  // In debug mode, make sure the lexical context is set.
3949 #ifdef DEBUG
3950  Cmp(scratch1, 0);
3951  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
3952 #endif
3953 
3954  // Load the native context of the current context.
3955  int offset =
3957  Ldr(scratch1, FieldMemOperand(scratch1, offset));
3959 
3960  // Check the context is a native context.
3961  if (emit_debug_code()) {
3962  // Read the first word and compare to the global_context_map.
3963  Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
3964  CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
3965  Check(eq, kExpectedNativeContext);
3966  }
3967 
3968  // Check if both contexts are the same.
3969  Ldr(scratch2, FieldMemOperand(holder_reg,
3971  Cmp(scratch1, scratch2);
3972  B(&same_contexts, eq);
3973 
3974  // Check the context is a native context.
3975  if (emit_debug_code()) {
3976  // We're short on scratch registers here, so use holder_reg as a scratch.
3977  Push(holder_reg);
3978  Register scratch3 = holder_reg;
3979 
3980  CompareRoot(scratch2, Heap::kNullValueRootIndex);
3981  Check(ne, kExpectedNonNullContext);
3982 
3983  Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
3984  CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
3985  Check(eq, kExpectedNativeContext);
3986  Pop(holder_reg);
3987  }
3988 
3989  // Check that the security token in the calling global object is
3990  // compatible with the security token in the receiving global
3991  // object.
3992  int token_offset = Context::kHeaderSize +
3994 
3995  Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
3996  Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
3997  Cmp(scratch1, scratch2);
3998  B(miss, ne);
3999 
4000  Bind(&same_contexts);
4001 }
4002 
4003 
4004 // Compute the hash code from the untagged key. This must be kept in sync with
4005 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
4006 // code-stub-hydrogen.cc
4007 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
4008  ASSERT(!AreAliased(key, scratch));
4009 
4010  // Xor original key with a seed.
4011  LoadRoot(scratch, Heap::kHashSeedRootIndex);
4012  Eor(key, key, Operand::UntagSmi(scratch));
4013 
4014  // The algorithm uses 32-bit integer values.
4015  key = key.W();
4016  scratch = scratch.W();
4017 
4018  // Compute the hash code from the untagged key. This must be kept in sync
4019  // with ComputeIntegerHash in utils.h.
4020  //
4021  // hash = ~hash + (hash <<1 15);
4022  Mvn(scratch, key);
4023  Add(key, scratch, Operand(key, LSL, 15));
4024  // hash = hash ^ (hash >> 12);
4025  Eor(key, key, Operand(key, LSR, 12));
4026  // hash = hash + (hash << 2);
4027  Add(key, key, Operand(key, LSL, 2));
4028  // hash = hash ^ (hash >> 4);
4029  Eor(key, key, Operand(key, LSR, 4));
4030  // hash = hash * 2057;
4031  Mov(scratch, Operand(key, LSL, 11));
4032  Add(key, key, Operand(key, LSL, 3));
4033  Add(key, key, scratch);
4034  // hash = hash ^ (hash >> 16);
4035  Eor(key, key, Operand(key, LSR, 16));
4036 }
4037 
4038 
4040  Register elements,
4041  Register key,
4042  Register result,
4043  Register scratch0,
4044  Register scratch1,
4045  Register scratch2,
4046  Register scratch3) {
4047  ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
4048 
4049  Label done;
4050 
4051  SmiUntag(scratch0, key);
4052  GetNumberHash(scratch0, scratch1);
4053 
4054  // Compute the capacity mask.
4055  Ldrsw(scratch1,
4056  UntagSmiFieldMemOperand(elements,
4058  Sub(scratch1, scratch1, 1);
4059 
4060  // Generate an unrolled loop that performs a few probes before giving up.
4061  for (int i = 0; i < kNumberDictionaryProbes; i++) {
4062  // Compute the masked index: (hash + i + i * i) & mask.
4063  if (i > 0) {
4064  Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
4065  } else {
4066  Mov(scratch2, scratch0);
4067  }
4068  And(scratch2, scratch2, scratch1);
4069 
4070  // Scale the index by multiplying by the element size.
4072  Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4073 
4074  // Check if the key is identical to the name.
4075  Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
4076  Ldr(scratch3,
4077  FieldMemOperand(scratch2,
4079  Cmp(key, scratch3);
4080  if (i != (kNumberDictionaryProbes - 1)) {
4081  B(eq, &done);
4082  } else {
4083  B(ne, miss);
4084  }
4085  }
4086 
4087  Bind(&done);
4088  // Check that the value is a normal property.
4089  const int kDetailsOffset =
4091  Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
4092  TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
4093 
4094  // Get the value at the masked, scaled index and return.
4095  const int kValueOffset =
4097  Ldr(result, FieldMemOperand(scratch2, kValueOffset));
4098 }
4099 
4100 
4101 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
4102  Register address,
4103  Register scratch1,
4104  SaveFPRegsMode fp_mode,
4105  RememberedSetFinalAction and_then) {
4106  ASSERT(!AreAliased(object, address, scratch1));
4107  Label done, store_buffer_overflow;
4108  if (emit_debug_code()) {
4109  Label ok;
4110  JumpIfNotInNewSpace(object, &ok);
4111  Abort(kRememberedSetPointerInNewSpace);
4112  bind(&ok);
4113  }
4114  UseScratchRegisterScope temps(this);
4115  Register scratch2 = temps.AcquireX();
4116 
4117  // Load store buffer top.
4118  Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
4119  Ldr(scratch1, MemOperand(scratch2));
4120  // Store pointer to buffer and increment buffer top.
4121  Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
4122  // Write back new top of buffer.
4123  Str(scratch1, MemOperand(scratch2));
4124  // Call stub on end of buffer.
4125  // Check for end of buffer.
4127  (1 << (14 + kPointerSizeLog2)));
4128  if (and_then == kFallThroughAtEnd) {
4129  Tbz(scratch1, (14 + kPointerSizeLog2), &done);
4130  } else {
4131  ASSERT(and_then == kReturnAtEnd);
4132  Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
4133  Ret();
4134  }
4135 
4136  Bind(&store_buffer_overflow);
4137  Push(lr);
4138  StoreBufferOverflowStub store_buffer_overflow_stub =
4139  StoreBufferOverflowStub(fp_mode);
4140  CallStub(&store_buffer_overflow_stub);
4141  Pop(lr);
4142 
4143  Bind(&done);
4144  if (and_then == kReturnAtEnd) {
4145  Ret();
4146  }
4147 }
4148 
4149 
4151  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4153  Drop(num_unsaved);
4154 }
4155 
4156 
4158  // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
4159  // adjust the stack for unsaved registers.
4160  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4161  ASSERT(num_unsaved >= 0);
4162  Claim(num_unsaved);
4164 }
4165 
4166 
4171 }
4172 
4173 
4178 }
4179 
4180 
4181 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
4182  // Make sure the safepoint registers list is what we expect.
4183  ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4184 
4185  // Safepoint registers are stored contiguously on the stack, but not all the
4186  // registers are saved. The following registers are excluded:
4187  // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
4188  // the macro assembler.
4189  // - x28 (jssp) because JS stack pointer doesn't need to be included in
4190  // safepoint registers.
4191  // - x31 (csp) because the system stack pointer doesn't need to be included
4192  // in safepoint registers.
4193  //
4194  // This function implements the mapping of register code to index into the
4195  // safepoint register slots.
4196  if ((reg_code >= 0) && (reg_code <= 15)) {
4197  return reg_code;
4198  } else if ((reg_code >= 18) && (reg_code <= 27)) {
4199  // Skip ip0 and ip1.
4200  return reg_code - 2;
4201  } else if ((reg_code == 29) || (reg_code == 30)) {
4202  // Also skip jssp.
4203  return reg_code - 3;
4204  } else {
4205  // This register has no safepoint register slot.
4206  UNREACHABLE();
4207  return -1;
4208  }
4209 }
4210 
4211 
4212 void MacroAssembler::CheckPageFlagSet(const Register& object,
4213  const Register& scratch,
4214  int mask,
4215  Label* if_any_set) {
4216  And(scratch, object, ~Page::kPageAlignmentMask);
4217  Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4218  TestAndBranchIfAnySet(scratch, mask, if_any_set);
4219 }
4220 
4221 
4222 void MacroAssembler::CheckPageFlagClear(const Register& object,
4223  const Register& scratch,
4224  int mask,
4225  Label* if_all_clear) {
4226  And(scratch, object, ~Page::kPageAlignmentMask);
4227  Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4228  TestAndBranchIfAllClear(scratch, mask, if_all_clear);
4229 }
4230 
4231 
4233  Register object,
4234  int offset,
4235  Register value,
4236  Register scratch,
4237  LinkRegisterStatus lr_status,
4238  SaveFPRegsMode save_fp,
4239  RememberedSetAction remembered_set_action,
4240  SmiCheck smi_check) {
4241  // First, check if a write barrier is even needed. The tests below
4242  // catch stores of Smis.
4243  Label done;
4244 
4245  // Skip the barrier if writing a smi.
4246  if (smi_check == INLINE_SMI_CHECK) {
4247  JumpIfSmi(value, &done);
4248  }
4249 
4250  // Although the object register is tagged, the offset is relative to the start
4251  // of the object, so offset must be a multiple of kPointerSize.
4252  ASSERT(IsAligned(offset, kPointerSize));
4253 
4254  Add(scratch, object, offset - kHeapObjectTag);
4255  if (emit_debug_code()) {
4256  Label ok;
4257  Tst(scratch, (1 << kPointerSizeLog2) - 1);
4258  B(eq, &ok);
4259  Abort(kUnalignedCellInWriteBarrier);
4260  Bind(&ok);
4261  }
4262 
4263  RecordWrite(object,
4264  scratch,
4265  value,
4266  lr_status,
4267  save_fp,
4268  remembered_set_action,
4269  OMIT_SMI_CHECK);
4270 
4271  Bind(&done);
4272 
4273  // Clobber clobbered input registers when running with the debug-code flag
4274  // turned on to provoke errors.
4275  if (emit_debug_code()) {
4276  Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
4277  Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
4278  }
4279 }
4280 
4281 
4282 // Will clobber: object, address, value.
4283 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4284 //
4285 // The register 'object' contains a heap object pointer. The heap object tag is
4286 // shifted away.
4287 void MacroAssembler::RecordWrite(Register object,
4288  Register address,
4289  Register value,
4290  LinkRegisterStatus lr_status,
4291  SaveFPRegsMode fp_mode,
4292  RememberedSetAction remembered_set_action,
4293  SmiCheck smi_check) {
4294  ASM_LOCATION("MacroAssembler::RecordWrite");
4295  ASSERT(!AreAliased(object, value));
4296 
4297  if (emit_debug_code()) {
4298  UseScratchRegisterScope temps(this);
4299  Register temp = temps.AcquireX();
4300 
4301  Ldr(temp, MemOperand(address));
4302  Cmp(temp, value);
4303  Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4304  }
4305 
4306  // Count number of write barriers in generated code.
4307  isolate()->counters()->write_barriers_static()->Increment();
4308  // TODO(mstarzinger): Dynamic counter missing.
4309 
4310  // First, check if a write barrier is even needed. The tests below
4311  // catch stores of smis and stores into the young generation.
4312  Label done;
4313 
4314  if (smi_check == INLINE_SMI_CHECK) {
4315  ASSERT_EQ(0, kSmiTag);
4316  JumpIfSmi(value, &done);
4317  }
4318 
4319  CheckPageFlagClear(value,
4320  value, // Used as scratch.
4322  &done);
4323  CheckPageFlagClear(object,
4324  value, // Used as scratch.
4326  &done);
4327 
4328  // Record the actual write.
4329  if (lr_status == kLRHasNotBeenSaved) {
4330  Push(lr);
4331  }
4332  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
4333  CallStub(&stub);
4334  if (lr_status == kLRHasNotBeenSaved) {
4335  Pop(lr);
4336  }
4337 
4338  Bind(&done);
4339 
4340  // Clobber clobbered registers when running with the debug-code flag
4341  // turned on to provoke errors.
4342  if (emit_debug_code()) {
4343  Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
4344  Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
4345  }
4346 }
4347 
4348 
4349 void MacroAssembler::AssertHasValidColor(const Register& reg) {
4350  if (emit_debug_code()) {
4351  // The bit sequence is backward. The first character in the string
4352  // represents the least significant bit.
4353  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4354 
4355  Label color_is_valid;
4356  Tbnz(reg, 0, &color_is_valid);
4357  Tbz(reg, 1, &color_is_valid);
4358  Abort(kUnexpectedColorFound);
4359  Bind(&color_is_valid);
4360  }
4361 }
4362 
4363 
4364 void MacroAssembler::GetMarkBits(Register addr_reg,
4365  Register bitmap_reg,
4366  Register shift_reg) {
4367  ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4368  ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4369  // addr_reg is divided into fields:
4370  // |63 page base 20|19 high 8|7 shift 3|2 0|
4371  // 'high' gives the index of the cell holding color bits for the object.
4372  // 'shift' gives the offset in the cell for this object's color.
4373  const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4374  UseScratchRegisterScope temps(this);
4375  Register temp = temps.AcquireX();
4376  Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4377  Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4378  Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4379  // bitmap_reg:
4380  // |63 page base 20|19 zeros 15|14 high 3|2 0|
4381  Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4382 }
4383 
4384 
4385 void MacroAssembler::HasColor(Register object,
4386  Register bitmap_scratch,
4387  Register shift_scratch,
4388  Label* has_color,
4389  int first_bit,
4390  int second_bit) {
4391  // See mark-compact.h for color definitions.
4392  ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
4393 
4394  GetMarkBits(object, bitmap_scratch, shift_scratch);
4395  Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4396  // Shift the bitmap down to get the color of the object in bits [1:0].
4397  Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4398 
4399  AssertHasValidColor(bitmap_scratch);
4400 
4401  // These bit sequences are backwards. The first character in the string
4402  // represents the least significant bit.
4403  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4404  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4405  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4406 
4407  // Check for the color.
4408  if (first_bit == 0) {
4409  // Checking for white.
4410  ASSERT(second_bit == 0);
4411  // We only need to test the first bit.
4412  Tbz(bitmap_scratch, 0, has_color);
4413  } else {
4414  Label other_color;
4415  // Checking for grey or black.
4416  Tbz(bitmap_scratch, 0, &other_color);
4417  if (second_bit == 0) {
4418  Tbz(bitmap_scratch, 1, has_color);
4419  } else {
4420  Tbnz(bitmap_scratch, 1, has_color);
4421  }
4422  Bind(&other_color);
4423  }
4424 
4425  // Fall through if it does not have the right color.
4426 }
4427 
4428 
4429 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
4430  Register scratch,
4431  Label* if_deprecated) {
4432  if (map->CanBeDeprecated()) {
4433  Mov(scratch, Operand(map));
4434  Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset));
4435  TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
4436  }
4437 }
4438 
4439 
4440 void MacroAssembler::JumpIfBlack(Register object,
4441  Register scratch0,
4442  Register scratch1,
4443  Label* on_black) {
4444  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4445  HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
4446 }
4447 
4448 
4450  Register object,
4451  Register scratch0,
4452  Register scratch1,
4453  Label* found) {
4454  ASSERT(!AreAliased(object, scratch0, scratch1));
4455  Factory* factory = isolate()->factory();
4456  Register current = scratch0;
4457  Label loop_again;
4458 
4459  // Scratch contains elements pointer.
4460  Mov(current, object);
4461 
4462  // Loop based on the map going up the prototype chain.
4463  Bind(&loop_again);
4464  Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4465  Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4467  CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4468  Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4469  CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
4470 }
4471 
4472 
4473 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
4474  Register result) {
4475  ASSERT(!result.Is(ldr_location));
4476  const uint32_t kLdrLitOffset_lsb = 5;
4477  const uint32_t kLdrLitOffset_width = 19;
4478  Ldr(result, MemOperand(ldr_location));
4479  if (emit_debug_code()) {
4480  And(result, result, LoadLiteralFMask);
4481  Cmp(result, LoadLiteralFixed);
4482  Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
4483  // The instruction was clobbered. Reload it.
4484  Ldr(result, MemOperand(ldr_location));
4485  }
4486  Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
4487  Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
4488 }
4489 
4490 
4492  Register value,
4493  Register bitmap_scratch,
4494  Register shift_scratch,
4495  Register load_scratch,
4496  Register length_scratch,
4497  Label* value_is_white_and_not_data) {
4498  ASSERT(!AreAliased(
4499  value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4500 
4501  // These bit sequences are backwards. The first character in the string
4502  // represents the least significant bit.
4503  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4504  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4505  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4506 
4507  GetMarkBits(value, bitmap_scratch, shift_scratch);
4508  Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4509  Lsr(load_scratch, load_scratch, shift_scratch);
4510 
4511  AssertHasValidColor(load_scratch);
4512 
4513  // If the value is black or grey we don't need to do anything.
4514  // Since both black and grey have a 1 in the first position and white does
4515  // not have a 1 there we only need to check one bit.
4516  Label done;
4517  Tbnz(load_scratch, 0, &done);
4518 
4519  // Value is white. We check whether it is data that doesn't need scanning.
4520  Register map = load_scratch; // Holds map while checking type.
4521  Label is_data_object;
4522 
4523  // Check for heap-number.
4525  Mov(length_scratch, HeapNumber::kSize);
4526  JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
4527 
4528  // Check for strings.
4530  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4531  // If it's a string and it's not a cons string then it's an object containing
4532  // no GC pointers.
4533  Register instance_type = load_scratch;
4534  Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
4535  TestAndBranchIfAnySet(instance_type,
4537  value_is_white_and_not_data);
4538 
4539  // It's a non-indirect (non-cons and non-slice) string.
4540  // If it's external, the length is just ExternalString::kSize.
4541  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4542  // External strings are the only ones with the kExternalStringTag bit
4543  // set.
4546  Mov(length_scratch, ExternalString::kSize);
4547  TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
4548 
4549  // Sequential string, either ASCII or UC16.
4550  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
4551  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
4552  // getting the length multiplied by 2.
4554  Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
4556  Tst(instance_type, kStringEncodingMask);
4557  Cset(load_scratch, eq);
4558  Lsl(length_scratch, length_scratch, load_scratch);
4559  Add(length_scratch,
4560  length_scratch,
4562  Bic(length_scratch, length_scratch, kObjectAlignmentMask);
4563 
4564  Bind(&is_data_object);
4565  // Value is a data object, and it is white. Mark it black. Since we know
4566  // that the object is white we can make it black by flipping one bit.
4567  Register mask = shift_scratch;
4568  Mov(load_scratch, 1);
4569  Lsl(mask, load_scratch, shift_scratch);
4570 
4571  Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4572  Orr(load_scratch, load_scratch, mask);
4573  Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4574 
4575  Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
4576  Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4577  Add(load_scratch, load_scratch, length_scratch);
4578  Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4579 
4580  Bind(&done);
4581 }
4582 
4583 
4584 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4585  if (emit_debug_code()) {
4586  Check(cond, reason);
4587  }
4588 }
4589 
4590 
4591 
4592 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4593  if (emit_debug_code()) {
4594  CheckRegisterIsClear(reg, reason);
4595  }
4596 }
4597 
4598 
4599 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4600  Heap::RootListIndex index,
4601  BailoutReason reason) {
4602  if (emit_debug_code()) {
4603  CompareRoot(reg, index);
4604  Check(eq, reason);
4605  }
4606 }
4607 
4608 
4609 void MacroAssembler::AssertFastElements(Register elements) {
4610  if (emit_debug_code()) {
4611  UseScratchRegisterScope temps(this);
4612  Register temp = temps.AcquireX();
4613  Label ok;
4614  Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4615  JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4616  JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4617  JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4618  Abort(kJSObjectWithFastElementsMapHasSlowElements);
4619  Bind(&ok);
4620  }
4621 }
4622 
4623 
4624 void MacroAssembler::AssertIsString(const Register& object) {
4625  if (emit_debug_code()) {
4626  UseScratchRegisterScope temps(this);
4627  Register temp = temps.AcquireX();
4628  STATIC_ASSERT(kSmiTag == 0);
4629  Tst(object, kSmiTagMask);
4630  Check(ne, kOperandIsNotAString);
4631  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4633  Check(lo, kOperandIsNotAString);
4634  }
4635 }
4636 
4637 
4638 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4639  Label ok;
4640  B(cond, &ok);
4641  Abort(reason);
4642  // Will not return here.
4643  Bind(&ok);
4644 }
4645 
4646 
4647 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4648  Label ok;
4649  Cbz(reg, &ok);
4650  Abort(reason);
4651  // Will not return here.
4652  Bind(&ok);
4653 }
4654 
4655 
4656 void MacroAssembler::Abort(BailoutReason reason) {
4657 #ifdef DEBUG
4658  RecordComment("Abort message: ");
4660 
4661  if (FLAG_trap_on_abort) {
4662  Brk(0);
4663  return;
4664  }
4665 #endif
4666 
4667  // Abort is used in some contexts where csp is the stack pointer. In order to
4668  // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4669  // There is no risk of register corruption here because Abort doesn't return.
4670  Register old_stack_pointer = StackPointer();
4671  SetStackPointer(jssp);
4672  Mov(jssp, old_stack_pointer);
4673 
4674  // We need some scratch registers for the MacroAssembler, so make sure we have
4675  // some. This is safe here because Abort never returns.
4676  RegList old_tmp_list = TmpList()->list();
4677  TmpList()->Combine(ip0);
4678  TmpList()->Combine(ip1);
4679 
4680  if (use_real_aborts()) {
4681  // Avoid infinite recursion; Push contains some assertions that use Abort.
4682  NoUseRealAbortsScope no_real_aborts(this);
4683 
4684  Mov(x0, Smi::FromInt(reason));
4685  Push(x0);
4686 
4687  if (!has_frame_) {
4688  // We don't actually want to generate a pile of code for this, so just
4689  // claim there is a stack frame, without generating one.
4690  FrameScope scope(this, StackFrame::NONE);
4691  CallRuntime(Runtime::kAbort, 1);
4692  } else {
4693  CallRuntime(Runtime::kAbort, 1);
4694  }
4695  } else {
4696  // Load the string to pass to Printf.
4697  Label msg_address;
4698  Adr(x0, &msg_address);
4699 
4700  // Call Printf directly to report the error.
4701  CallPrintf();
4702 
4703  // We need a way to stop execution on both the simulator and real hardware,
4704  // and Unreachable() is the best option.
4705  Unreachable();
4706 
4707  // Emit the message string directly in the instruction stream.
4708  {
4709  BlockPoolsScope scope(this);
4710  Bind(&msg_address);
4712  }
4713  }
4714 
4715  SetStackPointer(old_stack_pointer);
4716  TmpList()->set_list(old_tmp_list);
4717 }
4718 
4719 
4721  ElementsKind expected_kind,
4722  ElementsKind transitioned_kind,
4723  Register map_in_out,
4724  Register scratch1,
4725  Register scratch2,
4726  Label* no_map_match) {
4727  // Load the global or builtins object from the current context.
4728  Ldr(scratch1, GlobalObjectMemOperand());
4730 
4731  // Check that the function's map is the same as the expected cached map.
4732  Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
4733  size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4734  Ldr(scratch2, FieldMemOperand(scratch1, offset));
4735  Cmp(map_in_out, scratch2);
4736  B(ne, no_map_match);
4737 
4738  // Use the transitioned cached map.
4739  offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4740  Ldr(map_in_out, FieldMemOperand(scratch1, offset));
4741 }
4742 
4743 
4744 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4745  // Load the global or builtins object from the current context.
4746  Ldr(function, GlobalObjectMemOperand());
4747  // Load the native context from the global or builtins object.
4748  Ldr(function, FieldMemOperand(function,
4750  // Load the function from the native context.
4751  Ldr(function, ContextMemOperand(function, index));
4752 }
4753 
4754 
4755 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4756  Register map,
4757  Register scratch) {
4758  // Load the initial map. The global functions all have initial maps.
4760  if (emit_debug_code()) {
4761  Label ok, fail;
4762  CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4763  B(&ok);
4764  Bind(&fail);
4765  Abort(kGlobalFunctionsMustHaveInitialMap);
4766  Bind(&ok);
4767  }
4768 }
4769 
4770 
4771 // This is the main Printf implementation. All other Printf variants call
4772 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4773 void MacroAssembler::PrintfNoPreserve(const char * format,
4774  const CPURegister& arg0,
4775  const CPURegister& arg1,
4776  const CPURegister& arg2,
4777  const CPURegister& arg3) {
4778  // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4779  // in most cases anyway, so this restriction shouldn't be too serious.
4780  ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4781 
4782  // Make sure that the macro assembler doesn't try to use any of our arguments
4783  // as scratch registers.
4784  ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
4785  ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
4786 
4787  // We cannot print the stack pointer because it is typically used to preserve
4788  // caller-saved registers (using other Printf variants which depend on this
4789  // helper).
4790  ASSERT(!AreAliased(arg0, StackPointer()));
4791  ASSERT(!AreAliased(arg1, StackPointer()));
4792  ASSERT(!AreAliased(arg2, StackPointer()));
4793  ASSERT(!AreAliased(arg3, StackPointer()));
4794 
4795  static const int kMaxArgCount = 4;
4796  // Assume that we have the maximum number of arguments until we know
4797  // otherwise.
4798  int arg_count = kMaxArgCount;
4799 
4800  // The provided arguments.
4801  CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
4802 
4803  // The PCS registers where the arguments need to end up.
4804  CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
4805 
4806  // Promote FP arguments to doubles, and integer arguments to X registers.
4807  // Note that FP and integer arguments cannot be mixed, but we'll check
4808  // AreSameSizeAndType once we've processed these promotions.
4809  for (int i = 0; i < kMaxArgCount; i++) {
4810  if (args[i].IsRegister()) {
4811  // Note that we use x1 onwards, because x0 will hold the format string.
4812  pcs[i] = Register::XRegFromCode(i + 1);
4813  // For simplicity, we handle all integer arguments as X registers. An X
4814  // register argument takes the same space as a W register argument in the
4815  // PCS anyway. The only limitation is that we must explicitly clear the
4816  // top word for W register arguments as the callee will expect it to be
4817  // clear.
4818  if (!args[i].Is64Bits()) {
4819  const Register& as_x = args[i].X();
4820  And(as_x, as_x, 0x00000000ffffffff);
4821  args[i] = as_x;
4822  }
4823  } else if (args[i].IsFPRegister()) {
4824  pcs[i] = FPRegister::DRegFromCode(i);
4825  // C and C++ varargs functions (such as printf) implicitly promote float
4826  // arguments to doubles.
4827  if (!args[i].Is64Bits()) {
4828  FPRegister s(args[i]);
4829  const FPRegister& as_d = args[i].D();
4830  Fcvt(as_d, s);
4831  args[i] = as_d;
4832  }
4833  } else {
4834  // This is the first empty (NoCPUReg) argument, so use it to set the
4835  // argument count and bail out.
4836  arg_count = i;
4837  break;
4838  }
4839  }
4840  ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
4841  // Check that every remaining argument is NoCPUReg.
4842  for (int i = arg_count; i < kMaxArgCount; i++) {
4843  ASSERT(args[i].IsNone());
4844  }
4845  ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
4846  args[2], args[3],
4847  pcs[0], pcs[1],
4848  pcs[2], pcs[3]));
4849 
4850  // Move the arguments into the appropriate PCS registers.
4851  //
4852  // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
4853  // surprisingly complicated.
4854  //
4855  // * For even numbers of registers, we push the arguments and then pop them
4856  // into their final registers. This maintains 16-byte stack alignment in
4857  // case csp is the stack pointer, since we're only handling X or D
4858  // registers at this point.
4859  //
4860  // * For odd numbers of registers, we push and pop all but one register in
4861  // the same way, but the left-over register is moved directly, since we
4862  // can always safely move one register without clobbering any source.
4863  if (arg_count >= 4) {
4864  Push(args[3], args[2], args[1], args[0]);
4865  } else if (arg_count >= 2) {
4866  Push(args[1], args[0]);
4867  }
4868 
4869  if ((arg_count % 2) != 0) {
4870  // Move the left-over register directly.
4871  const CPURegister& leftover_arg = args[arg_count - 1];
4872  const CPURegister& leftover_pcs = pcs[arg_count - 1];
4873  if (leftover_arg.IsRegister()) {
4874  Mov(Register(leftover_pcs), Register(leftover_arg));
4875  } else {
4876  Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
4877  }
4878  }
4879 
4880  if (arg_count >= 4) {
4881  Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
4882  } else if (arg_count >= 2) {
4883  Pop(pcs[0], pcs[1]);
4884  }
4885 
4886  // Load the format string into x0, as per the procedure-call standard.
4887  //
4888  // To make the code as portable as possible, the format string is encoded
4889  // directly in the instruction stream. It might be cleaner to encode it in a
4890  // literal pool, but since Printf is usually used for debugging, it is
4891  // beneficial for it to be minimally dependent on other features.
4892  Label format_address;
4893  Adr(x0, &format_address);
4894 
4895  // Emit the format string directly in the instruction stream.
4896  { BlockPoolsScope scope(this);
4897  Label after_data;
4898  B(&after_data);
4899  Bind(&format_address);
4900  EmitStringData(format);
4901  Unreachable();
4902  Bind(&after_data);
4903  }
4904 
4905  // We don't pass any arguments on the stack, but we still need to align the C
4906  // stack pointer to a 16-byte boundary for PCS compliance.
4907  if (!csp.Is(StackPointer())) {
4908  Bic(csp, StackPointer(), 0xf);
4909  }
4910 
4911  CallPrintf(pcs[0].type());
4912 }
4913 
4914 
4915 void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
4916  // A call to printf needs special handling for the simulator, since the system
4917  // printf function will use a different instruction set and the procedure-call
4918  // standard will not be compatible.
4919 #ifdef USE_SIMULATOR
4920  { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4922  dc32(type);
4923  }
4924 #else
4925  Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
4926 #endif
4927 }
4928 
4929 
4930 void MacroAssembler::Printf(const char * format,
4931  const CPURegister& arg0,
4932  const CPURegister& arg1,
4933  const CPURegister& arg2,
4934  const CPURegister& arg3) {
4935  // Printf is expected to preserve all registers, so make sure that none are
4936  // available as scratch registers until we've preserved them.
4937  RegList old_tmp_list = TmpList()->list();
4938  RegList old_fp_tmp_list = FPTmpList()->list();
4939  TmpList()->set_list(0);
4940  FPTmpList()->set_list(0);
4941 
4942  // Preserve all caller-saved registers as well as NZCV.
4943  // If csp is the stack pointer, PushCPURegList asserts that the size of each
4944  // list is a multiple of 16 bytes.
4947 
4948  // We can use caller-saved registers as scratch values (except for argN).
4949  CPURegList tmp_list = kCallerSaved;
4950  CPURegList fp_tmp_list = kCallerSavedFP;
4951  tmp_list.Remove(arg0, arg1, arg2, arg3);
4952  fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4953  TmpList()->set_list(tmp_list.list());
4954  FPTmpList()->set_list(fp_tmp_list.list());
4955 
4956  // Preserve NZCV.
4957  { UseScratchRegisterScope temps(this);
4958  Register tmp = temps.AcquireX();
4959  Mrs(tmp, NZCV);
4960  Push(tmp, xzr);
4961  }
4962 
4963  PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
4964 
4965  { UseScratchRegisterScope temps(this);
4966  Register tmp = temps.AcquireX();
4967  Pop(xzr, tmp);
4968  Msr(NZCV, tmp);
4969  }
4970 
4973 
4974  TmpList()->set_list(old_tmp_list);
4975  FPTmpList()->set_list(old_fp_tmp_list);
4976 }
4977 
4978 
4980  // TODO(jbramley): Other architectures use the internal memcpy to copy the
4981  // sequence. If this is a performance bottleneck, we should consider caching
4982  // the sequence and copying it in the same way.
4983  InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
4984  ASSERT(jssp.Is(StackPointer()));
4986 }
4987 
4988 
4989 
4990 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
4991  InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
4992  ASSERT(jssp.Is(StackPointer()));
4993  EmitCodeAgeSequence(this, stub);
4994 }
4995 
4996 
4997 #undef __
4998 #define __ assm->
4999 
5000 
5002  Label start;
5003  __ bind(&start);
5004 
5005  // We can do this sequence using four instructions, but the code ageing
5006  // sequence that patches it needs five, so we use the extra space to try to
5007  // simplify some addressing modes and remove some dependencies (compared to
5008  // using two stp instructions with write-back).
5009  __ sub(jssp, jssp, 4 * kXRegSize);
5010  __ sub(csp, csp, 4 * kXRegSize);
5011  __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
5012  __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
5014 
5015  __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
5016 }
5017 
5018 
5020  Code * stub) {
5021  Label start;
5022  __ bind(&start);
5023  // When the stub is called, the sequence is replaced with the young sequence
5024  // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
5025  // stub jumps to &start, stored in x0. The young sequence does not call the
5026  // stub so there is no infinite loop here.
5027  //
5028  // A branch (br) is used rather than a call (blr) because this code replaces
5029  // the frame setup code that would normally preserve lr.
5030  __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
5031  __ adr(x0, &start);
5032  __ br(ip0);
5033  // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
5034  // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
5035  __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
5036  if (stub) {
5037  __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
5038  __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
5039  }
5040 }
5041 
5042 
5043 bool MacroAssembler::IsYoungSequence(byte* sequence) {
5044  // Generate a young sequence to compare with.
5045  const int length = kCodeAgeSequenceSize / kInstructionSize;
5046  static bool initialized = false;
5047  static byte young[kCodeAgeSequenceSize];
5048  if (!initialized) {
5049  PatchingAssembler patcher(young, length);
5050  // The young sequence is the frame setup code for FUNCTION code types. It is
5051  // generated by FullCodeGenerator::Generate.
5053  initialized = true;
5054  }
5055 
5056  bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0);
5057  ASSERT(is_young || IsCodeAgeSequence(sequence));
5058  return is_young;
5059 }
5060 
5061 
5062 #ifdef DEBUG
5063 bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
5064  // The old sequence varies depending on the code age. However, the code up
5065  // until kCodeAgeStubEntryOffset does not change, so we can check that part to
5066  // get a reasonable level of verification.
5067  const int length = kCodeAgeStubEntryOffset / kInstructionSize;
5068  static bool initialized = false;
5069  static byte old[kCodeAgeStubEntryOffset];
5070  if (!initialized) {
5071  PatchingAssembler patcher(old, length);
5073  initialized = true;
5074  }
5075  return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0;
5076 }
5077 #endif
5078 
5079 
5080 void MacroAssembler::TruncatingDiv(Register result,
5081  Register dividend,
5082  int32_t divisor) {
5083  ASSERT(!AreAliased(result, dividend));
5084  ASSERT(result.Is32Bits() && dividend.Is32Bits());
5085  MultiplierAndShift ms(divisor);
5086  Mov(result, ms.multiplier());
5087  Smull(result.X(), dividend, result);
5088  Asr(result.X(), result.X(), 32);
5089  if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
5090  if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
5091  if (ms.shift() > 0) Asr(result, result, ms.shift());
5092  Add(result, result, Operand(dividend, LSR, 31));
5093 }
5094 
5095 
5096 #undef __
5097 
5098 
5100  available_->set_list(old_available_);
5101  availablefp_->set_list(old_availablefp_);
5102 }
5103 
5104 
5105 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
5106  int code = AcquireNextAvailable(available_).code();
5107  return Register::Create(code, reg.SizeInBits());
5108 }
5109 
5110 
5111 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
5112  int code = AcquireNextAvailable(availablefp_).code();
5113  return FPRegister::Create(code, reg.SizeInBits());
5114 }
5115 
5116 
5117 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5118  CPURegList* available) {
5119  CHECK(!available->IsEmpty());
5120  CPURegister result = available->PopLowestIndex();
5121  ASSERT(!AreAliased(result, xzr, csp));
5122  return result;
5123 }
5124 
5125 
5126 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
5127  const CPURegister& reg) {
5128  ASSERT(available->IncludesAliasOf(reg));
5129  available->Remove(reg);
5130  return reg;
5131 }
5132 
5133 
5134 #define __ masm->
5135 
5136 
5137 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
5138  const Label* smi_check) {
5139  Assembler::BlockPoolsScope scope(masm);
5140  if (reg.IsValid()) {
5141  ASSERT(smi_check->is_bound());
5142  ASSERT(reg.Is64Bits());
5143 
5144  // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
5145  // 'check' in the other bits. The possible offset is limited in that we
5146  // use BitField to pack the data, and the underlying data type is a
5147  // uint32_t.
5148  uint32_t delta = __ InstructionsGeneratedSince(smi_check);
5149  __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5150  } else {
5151  ASSERT(!smi_check->is_bound());
5152 
5153  // An offset of 0 indicates that there is no patch site.
5154  __ InlineData(0);
5155  }
5156 }
5157 
5158 
5160  : reg_(NoReg), smi_check_(NULL) {
5161  InstructionSequence* inline_data = InstructionSequence::At(info);
5162  ASSERT(inline_data->IsInlineData());
5163  if (inline_data->IsInlineData()) {
5164  uint64_t payload = inline_data->InlineData();
5165  // We use BitField to decode the payload, and BitField can only handle
5166  // 32-bit values.
5167  ASSERT(is_uint32(payload));
5168  if (payload != 0) {
5169  int reg_code = RegisterBits::decode(payload);
5170  reg_ = Register::XRegFromCode(reg_code);
5171  uint64_t smi_check_delta = DeltaBits::decode(payload);
5172  ASSERT(smi_check_delta != 0);
5173  smi_check_ = inline_data->preceding(smi_check_delta);
5174  }
5175  }
5176 }
5177 
5178 
5179 #undef __
5180 
5181 
5182 } } // namespace v8::internal
5183 
5184 #endif // V8_TARGET_ARCH_ARM64
void AssertRegisterIsClear(Register reg, BailoutReason reason)
byte * Address
Definition: globals.h:186
void cbnz(const Register &rt, Label *label)
const RegList kSafepointSavedRegisters
Definition: frames-arm.h:97
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void Poke(const CPURegister &src, const Operand &offset)
void SmiUntag(Register reg, SBit s=LeaveCC)
void SmiAbs(const Register &smi, Label *slow)
void TestMapBitfield(Register object, uint64_t mask)
void EmitExtendShift(const Register &rd, const Register &rn, Extend extend, unsigned left_shift)
static const char * kGreyBitPattern
Definition: mark-compact.h:81
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if available(ARM only)") DEFINE_bool(enable_sudiv
static const int kFPOffset
Definition: frames.h:98
void Mvn(const Register &rd, uint64_t imm)
Isolate * isolate() const
Definition: assembler.h:62
void Adr(const Register &rd, Label *label)
static FPRegister Create(unsigned code, unsigned size)
int InstructionsGeneratedSince(Label *label)
const intptr_t kSmiTagMask
Definition: v8.h:5480
void TestAndBranchIfAllClear(const Register &reg, const uint64_t bit_pattern, Label *label)
void csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static const int kCodeEntryOffset
Definition: objects.h:7518
const Register & AppropriateZeroRegFor(const CPURegister &reg) const
const RegList kCallerSaved
Definition: frames-arm.h:75
void tbz(const Register &rt, unsigned bit_pos, Label *label)
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
static const int kStateOffset
Definition: frames.h:96
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size)
void FillFields(Register dst, Register field_count, Register filler)
static const int kValueOffset
Definition: objects.h:9547
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index, BailoutReason reason=kRegisterDidNotMatchExpectedRoot)
static int SlotOffset(int index)
Definition: contexts.h:498
void Orr(const Register &rd, const Register &rn, const Operand &operand)
void GetRelocatedValueLocation(Register ldr_location, Register result)
static const int kBuiltinsOffset
Definition: objects.h:7610
void InvokeFunction(Register function, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
static const int kSize
Definition: objects.h:9167
void LoadRelocated(const CPURegister &rt, const Operand &operand)
void PrintfNoPreserve(const char *format, const CPURegister &arg0=NoCPUReg, const CPURegister &arg1=NoCPUReg, const CPURegister &arg2=NoCPUReg, const CPURegister &arg3=NoCPUReg)
void LoadElementsKindFromMap(Register result, Register map)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths true
Definition: flags.cc:208
void SmiTag(Register reg, SBit s=LeaveCC)
void B(Label *label, BranchType type, Register reg=NoReg, int bit=-1)
const unsigned kWRegSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
void Ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
static const int kCallSizeWithoutRelocation
const int kDoubleSizeLog2
Definition: globals.h:273
void Lsr(const Register &rd, const Register &rn, unsigned shift)
void Tbz(const Register &rt, unsigned bit_pos, Label *label)
void ClampInt32ToUint8(Register in_out)
const LowDwVfpRegister d11
static const RegList kAllocatableFPRegisters
void CompareAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void mov(Register rd, Register rt)
static const char * kWhiteBitPattern
Definition: mark-compact.h:75
const Register cp
static const int kCodeOffset
Definition: frames.h:95
static const int kCallSizeWithRelocation
const LowDwVfpRegister d0
void Ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
static bool IsImmLSUnscaled(ptrdiff_t offset)
const unsigned kDRegSizeInBits
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
void AssertString(Register object)
#define ASM_LOCATION(message)
Definition: checks.h:69
void ConditionalCompareMacro(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
void JumpForHeapNumber(Register object, Register heap_number_map, Label *on_heap_number, Label *on_not_heap_number=NULL)
void JumpToExternalReference(const ExternalReference &builtin)
void JumpIfEitherInstanceTypeIsNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
const int64_t kWRegMask
static const int kElementsKindBitCount
Definition: objects.h:6483
void LoadInstanceDescriptors(Register map, Register descriptors)
static const int kVeneerDistanceCheckMargin
void tbnz(const Register &rt, unsigned bit_pos, Label *label)
void ConditionalCompare(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
void Cneg(const Register &rd, const Register &rn, Condition cond)
const unsigned kWRegSizeInBitsLog2
void AllocateAsciiString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
STATIC_ASSERT((reg_zero==(reg_not_zero^1))&&(reg_bit_clear==(reg_bit_set^1))&&(always==(never^1)))
static LSDataSize CalcLSDataSize(LoadStoreOp op)
const unsigned kByteSizeInBytes
const unsigned kXRegSizeInBits
void Fcvtnu(const Register &rd, const FPRegister &fn)
void LoadFromNumberDictionary(Label *miss, Register elements, Register key, Register result, Register t0, Register t1, Register t2)
#define kCallerSavedFP
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, Register scratch1, Label *found)
void CheckMap(Register obj, Register scratch, Handle< Map > map, Label *fail, SmiCheckType smi_check_type)
void Store(Register src, const MemOperand &dst, Representation r)
void GetBuiltinEntry(Register target, Builtins::JavaScript id)
void b(int branch_offset, Condition cond=al)
void JumpIfSmi(Register value, Label *smi_label)
void Ldr(const FPRegister &ft, double imm)
void DispatchMap(Register obj, Register scratch, Handle< Map > map, Handle< Code > success, SmiCheckType smi_check_type)
static Code * GetPreAgedCodeAgeStub(Isolate *isolate)
Definition: objects.h:5559
TypeImpl< ZoneTypeConfig > Type
bool AllowThisStubCall(CodeStub *stub)
static Register Create(unsigned code, unsigned size)
Builtins * builtins()
Definition: isolate.h:948
int int32_t
Definition: unicode.cc:47
static const intptr_t kPageAlignmentMask
Definition: spaces.h:823
uint32_t RegList
Definition: frames.h:41
void EnterFrame(StackFrame::Type type, bool load_constant_pool=false)
void Peek(const CPURegister &dst, const Operand &offset)
void LeaveExitFrame(bool save_doubles, Register argument_count, bool restore_context)
const Register & StackPointer() const
static void Emit(MacroAssembler *masm, const Register &reg, const Label *smi_check)
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
void CheckFastObjectElements(Register map, Register scratch, Label *fail)
static const int kSize
Definition: objects.h:9245
void EnumLengthSmi(Register dst, Register map)
void Fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
void Logical(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void InvokeCode(Register code, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
const LowDwVfpRegister d15
static const int kHandlerTableOffset
Definition: objects.h:5583
void ThrowIf(Condition cc, BailoutReason reason)
#define ASSERT(condition)
Definition: checks.h:329
void PushMultipleTimes(CPURegister src, Register count)
void CompareMap(Register obj, Register scratch, Handle< Map > map, Label *early_success)
static const int kContextOffset
Definition: frames.h:185
void AssertNotSmi(Register object)
const int kPointerSizeLog2
Definition: globals.h:281
void RecordWriteField(Register object, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void SetRecordedAstId(TypeFeedbackId ast_id)
static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset)
void Subs(const Register &rd, const Register &rn, const Operand &operand)
static const int kFlagsOffset
Definition: spaces.h:655
static const int kNativeContextOffset
Definition: objects.h:7567
void PushTryHandler(StackHandler::Kind kind, int handler_index)
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label *no_map_match)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
static const char * kBlackBitPattern
Definition: mark-compact.h:69
static int OffsetOfFunctionWithId(Builtins::JavaScript id)
Definition: objects.h:7674
void NumberOfOwnDescriptors(Register dst, Register map)
static const int kNextOffset
Definition: frames.h:94
const uint32_t kStringRepresentationMask
Definition: objects.h:615
void JumpIfMinusZero(DoubleRegister input, Label *on_negative_zero)
void csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void CompareAndSplit(const Register &lhs, const Operand &rhs, Condition cond, Label *if_true, Label *if_false, Label *fall_through)
#define CHECK(condition)
Definition: checks.h:75
void JumpIfRoot(const Register &obj, Heap::RootListIndex index, Label *if_equal)
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void movk(const Register &rd, uint64_t imm, int shift=-1)
void Bic(const Register &rd, const Register &rn, const Operand &operand)
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
const bool FLAG_enable_slow_asserts
Definition: checks.h:307
void Sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
static const int kDescriptorsOffset
Definition: objects.h:6435
int WhichPowerOf2(uint32_t x)
Definition: utils.h:57
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
Factory * factory()
Definition: isolate.h:995
void Load(Register dst, const MemOperand &src, Representation r)
static Operand UntagSmiAndScale(Register smi, int scale)
bool AreSameSizeAndType(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoCPUReg, const CPURegister &reg4=NoCPUReg, const CPURegister &reg5=NoCPUReg, const CPURegister &reg6=NoCPUReg, const CPURegister &reg7=NoCPUReg, const CPURegister &reg8=NoCPUReg)
static const int kContextOffset
Definition: objects.h:7523
const LowDwVfpRegister d10
void LoadStoreMacro(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
const Instr kImmExceptionIsPrintf
const intptr_t kHeapObjectTagMask
Definition: v8.h:5475
void AssertSmi(Register object)
void CompareRoot(Register obj, Heap::RootListIndex index)
void JumpIfNotUniqueName(Register reg, Label *not_unique_name)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
const unsigned kXRegSizeInBitsLog2
void LoadTrueFalseRoots(Register true_root, Register false_root)
const int64_t kXSignBit
void TruncatingDiv(Register result, Register dividend, int32_t divisor)
void EmitSeqStringSetCharCheck(Register string, Register index, Register value, uint32_t encoding_mask)
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
void EnumLengthUntagged(Register dst, Register map)
void Abort(BailoutReason msg)
static const int kSize
Definition: objects.h:10077
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void Eor(const Register &rd, const Register &rn, const Operand &operand)
uint8_t byte
Definition: globals.h:185
const unsigned kWRegSizeInBits
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label *failure)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static bool IsImmMovz(uint64_t imm, unsigned reg_size)
const uint32_t kNotStringTag
Definition: objects.h:599
void JumpIfNotHeapNumber(Register object, Register heap_number_map, Register scratch, Label *on_not_heap_number)
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
void JumpIfNotInNewSpace(Register object, Register scratch, Label *branch)
static const int kCallerPCOffset
Definition: frames-arm.h:123
#define UNREACHABLE()
Definition: checks.h:52
static const uint32_t kBytesPerCellLog2
Definition: spaces.h:172
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void Cbnz(const Register &rt, Label *label)
void TestJSArrayForAllocationMemento(Register receiver_reg, Register scratch_reg, Label *no_memento_found)
void AssertHasValidColor(const Register &reg)
static const size_t kHeaderSize
Definition: spaces.h:580
void cbz(const Register &rt, Label *label)
static int ActivationFrameAlignment()
const LowDwVfpRegister d14
void LoadStore(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
static const int kLengthOffset
Definition: objects.h:8905
void CheckFastElements(Register map, Register scratch, Label *fail)
void TestForMinusZero(DoubleRegister input)
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
Condition InvertCondition(Condition cond)
void AddSubWithCarryMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
const int kDoubleSize
Definition: globals.h:266
void br(const Register &xn)
void LoadGlobalFunction(int index, Register function)
void TryGetFunctionPrototype(Register function, Register result, Register scratch, Label *miss, bool miss_on_bound_function=false)
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:7098
const LowDwVfpRegister d13
static const int kCallerFPOffset
Definition: frames-arm.h:121
static const int kStoreBufferOverflowBit
Definition: store-buffer.h:92
static const int8_t kMaximumBitField2FastHoleyElementValue
Definition: objects.h:6493
PrologueFrameMode
Definition: frames.h:957
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
const unsigned kPrintfLength
static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size)
void AddSub(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void SetStackPointer(const Register &stack_pointer)
static bool IsImmAddSub(int64_t immediate)
const int kPointerSize
Definition: globals.h:268
void csel(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static const int kMaxCachedArrayIndexLength
Definition: objects.h:8649
void TestAndBranchIfAnySet(const Register &reg, const uint64_t bit_pattern, Label *label)
void CallStub(CodeStub *stub, TypeFeedbackId ast_id=TypeFeedbackId::None(), Condition cond=al)
void CallCFunction(ExternalReference function, int num_arguments)
void Fcmp(const FPRegister &fn, const FPRegister &fm)
const unsigned kInstructionSize
void AllocateAsciiConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
static bool IsImmConditionalCompare(int64_t immediate)
const Address kZapValue
Definition: v8globals.h:82
void dc64(uint64_t data)
void JumpIfHeapNumber(Register object, Label *on_heap_number, Register heap_number_map=NoReg)
const int kHeapObjectTag
Definition: v8.h:5473
void Jump(Register target, Condition cond=al)
bool IsAligned(T value, U alignment)
Definition: utils.h:211
void RecordWrite(Register object, Register address, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void TruncateHeapNumberToI(Register result, Register object)
void Allocate(int object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
void Fmov(FPRegister fd, FPRegister fn)
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required, TaggingMode tagging_mode=TAG_RESULT)
void CopyBytes(Register src, Register dst, Register length, Register scratch)
void LoadHeapObject(Register dst, Handle< HeapObject > object)
const RegList kCalleeSaved
Definition: frames-arm.h:63
void Throw(Register value)
void Fcvtzs(const Register &rd, const FPRegister &fn)
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void ThrowIfSmi(const Register &value, BailoutReason reason)
#define __
void JumpIfNotObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_not_object)
static ExternalReference GetAllocationLimitReference(Isolate *isolate, AllocationFlags flags)
void SetCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void str(Register src, const MemOperand &dst, Condition cond=al)
bool NeedExtraInstructionsOrRegisterBranch(Label *label, ImmBranchType branch_type)
Handle< Cell > NewCell(Handle< Object > value)
Definition: factory.cc:776
static const int kHeaderSize
Definition: objects.h:9042
static const int kElementsOffset
Definition: objects.h:2756
bool IsPowerOf2(T x)
Definition: utils.h:51
void movz(const Register &rd, uint64_t imm, int shift=-1)
void PopCPURegList(CPURegList registers)
int TenToThe(int exponent)
Definition: utils.h:880
bool Is(Object *obj)
void SmiUntagToDouble(FPRegister dst, Register src, UntagMode mode=kNotSpeculativeUntag)
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:598
void LoadContext(Register dst, int context_chain_length)
static const int kArrayIndexValueBits
Definition: objects.h:8654
void Cset(const Register &rd, Condition cond)
static int CallSize(Register target, Condition cond=al)
void CallExternalReference(const ExternalReference &ext, int num_arguments)
void CallApiFunctionAndReturn(Register function_address, ExternalReference thunk_ref, int stack_space, MemOperand return_value_operand, MemOperand *context_restore_operand)
void AssertFastElements(Register elements)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const uint32_t kInternalizedTag
Definition: objects.h:605
void CheckMapDeprecated(Handle< Map > map, Register scratch, Label *if_deprecated)
void set_list(RegList new_list)
void JumpIfBlack(Register object, Register scratch0, Register scratch1, Label *on_black)
const LowDwVfpRegister d9
void ClampDoubleToUint8(Register result_reg, DwVfpRegister input_reg, LowDwVfpRegister double_scratch)
void AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void Cbz(const Register &rt, Label *label)
void Drop(int count, Condition cond=al)
static const int kCallerSPDisplacement
Definition: frames-arm.h:127
void AddSubWithCarry(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
InvokeFlag
void GetBuiltinFunction(Register target, Builtins::JavaScript id)
void Stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
const unsigned kXRegSize
static const int kHeaderSize
Definition: objects.h:3016
AllocationFlags
const Register lr
void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label *miss)
void CopyFields(Register dst, Register src, LowDwVfpRegister double_scratch, int field_count)
static const int kMapOffset
Definition: objects.h:1890
static const int kFixedFrameSizeFromFp
Definition: frames.h:180
static const int kSize
Definition: objects.h:9211
void Abs(const Register &rd, const Register &rm, Label *is_not_representable=NULL, Label *is_representable=NULL)
void LookupNumberStringCache(Register object, Register result, Register scratch1, Register scratch2, Register scratch3, Label *not_found)
void Combine(const CPURegList &other)
void TruncateDoubleToI(Register result, DwVfpRegister double_input)
const uint32_t kIsNotStringMask
Definition: objects.h:597
static const intptr_t kLiveBytesOffset
Definition: spaces.h:570
void dc32(uint32_t data)
void LoadObject(Register result, Handle< Object > object)
static bool IsImmMovn(uint64_t imm, unsigned reg_size)
void Add(const Register &rd, const Register &rn, const Operand &operand)
void Tbnz(const Register &rt, unsigned bit_pos, Label *label)
const char * GetBailoutReason(BailoutReason reason)
Definition: objects.cc:16437
static const int kLengthOffset
Definition: objects.h:3015
static const int kSize
Definition: objects.h:1979
void Printf(const char *format, const CPURegister &arg0=NoCPUReg, const CPURegister &arg1=NoCPUReg, const CPURegister &arg2=NoCPUReg, const CPURegister &arg3=NoCPUReg)
void ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void ThrowUncatchable(Register value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
void StoreRoot(Register source, Heap::RootListIndex index, Condition cond=al)
void RecordComment(const char *msg)
MemOperand FieldMemOperand(Register object, int offset)
CpuProfiler * cpu_profiler() const
Definition: isolate.h:984
static const int kContextOffset
Definition: frames.h:97
static const int kHasNonInstancePrototype
Definition: objects.h:6468
const int kNumSafepointRegisters
Definition: frames-arm.h:92
bool emit_debug_code() const
Definition: assembler.h:65
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
static ExternalReference GetAllocationTopReference(Isolate *isolate, AllocationFlags flags)
void GetNumberHash(Register t0, Register scratch)
void hint(SystemHint code)
static FPRegister DRegFromCode(unsigned code)
static const int kLastExitFrameField
Definition: frames-arm64.h:71
void CallRuntime(const Runtime::Function *f, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
static const int kFormalParameterCountOffset
Definition: objects.h:7156
static const int kBitField3Offset
Definition: objects.h:6439
void TryConvertDoubleToInt64(Register as_int, FPRegister value, FPRegister scratch_d, Label *on_successful_conversion=NULL, Label *on_failed_conversion=NULL)
const unsigned kDRegSize
void Msr(SystemRegister sysreg, const Register &rt)
void Mrs(const Register &rt, SystemRegister sysreg)
const intptr_t kPointerAlignment
Definition: v8globals.h:48
void EmitStringData(const char *string)
static Register XRegFromCode(unsigned code)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
Register UnsafeAcquire(const Register &reg)
static const int kHeaderSize
Definition: objects.h:5604
void BumpSystemStackPointer(const Operand &space)
void PokePair(const CPURegister &src1, const CPURegister &src2, int offset)
std::multimap< int, FarBranchInfo > unresolved_branches_
void JumpIfNotRoot(const Register &obj, Heap::RootListIndex index, Label *if_not_equal)
void JumpIfBothInstanceTypesAreNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
void Cmp(const Register &rn, const Operand &operand)
void Tst(const Register &rn, const Operand &operand)
void UndoAllocationInNewSpace(Register object, Register scratch)
static int32_t ImmBranchRange(ImmBranchType branch_type)
void PeekPair(const CPURegister &dst1, const CPURegister &dst2, int offset)
void Prologue(PrologueFrameMode frame_mode)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void Lsl(const Register &rd, const Register &rn, unsigned shift)
void JumpIfObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_cond_pass, Condition cond=eq)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
static const int kPointersFromHereAreInterestingMask
Definition: spaces.h:427
const unsigned kWordSizeInBytesLog2
static const int kElementsKindShift
Definition: objects.h:6482
void Call(Register target, Condition cond=al)
void Scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void USE(T)
Definition: globals.h:341
static const int kConstructorOffset
Definition: objects.h:6428
const uint32_t kOneByteStringTag
Definition: objects.h:611
Counters * counters()
Definition: isolate.h:859
void AllocateAsciiSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
static double canonical_not_the_hole_nan_as_double()
Definition: objects-inl.h:2166
void Ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
const int kSmiTag
Definition: v8.h:5478
static int ActivationFrameAlignment()
void CheckPageFlagClear(const Register &object, const Register &scratch, int mask, Label *if_all_clear)
void CheckRegisterIsClear(Register reg, BailoutReason reason)
void Check(Condition cond, BailoutReason reason)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void PushCPURegList(CPURegList registers)
PositionsRecorder * positions_recorder()
void Assert(Condition cond, BailoutReason reason)
void movn(const Register &rd, uint64_t imm, int shift=-1)
const uint32_t kDebugZapValue
Definition: v8globals.h:87
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper &call_wrapper=NullCallWrapper())
void Claim(uint64_t count, uint64_t unit_size=kXRegSize)
const uint32_t kIsIndirectStringTag
Definition: objects.h:623
static const int8_t kMaximumBitField2FastHoleySmiElementValue
Definition: objects.h:6496
void Adds(const Register &rd, const Register &rn, const Operand &operand)
static const int kPrototypeOffset
Definition: objects.h:6427
const LowDwVfpRegister d12
void StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, LowDwVfpRegister double_scratch, Label *fail, int elements_offset=0)
void AssertSizeOfCodeGeneratedSince(const Label *label, ptrdiff_t size)
void TailCallStub(CodeStub *stub, Condition cond=al)
void Smull(const Register &rd, const Register &rn, const Register &rm)
HeapObject * obj
void TestAndSplit(const Register &reg, uint64_t bit_pattern, Label *if_all_clear, Label *if_any_set, Label *fall_through)
static const int kHashShift
Definition: objects.h:8642
const int kPageSizeBits
Definition: v8globals.h:95
void AssertName(Register object)
const Register no_reg
void Csel(const Register &rd, const Register &rn, const Operand &operand, Condition cond)
void EnsureNotWhite(Register object, Register scratch1, Register scratch2, Register scratch3, Label *object_is_white_and_not_data)
const LowDwVfpRegister d1
static void EmitCodeAgeSequence(Assembler *assm, Code *stub)
static const char * kImpossibleBitPattern
Definition: mark-compact.h:63
void JumpIfEitherIsNotSequentialAsciiStrings(Register first, Register second, Register scratch1, Register scratch2, Label *failure, SmiCheckType smi_check=DO_SMI_CHECK)
static const uint32_t kBitsPerCellLog2
Definition: spaces.h:169
const Register fp
int LeaveFrame(StackFrame::Type type)
static const int kPointersToHereAreInterestingMask
Definition: spaces.h:424
void JumpIfEitherSmi(Register reg1, Register reg2, Label *on_either_smi)
static const int kNativeContextOffset
Definition: objects.h:7611
void Asr(const Register &rd, const Register &rn, unsigned shift)
MemOperand ContextMemOperand(Register context, int index)
static bool IsYoungSequence(byte *sequence)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
void IndexFromHash(Register hash, Register index)
void adr(const Register &rd, Label *label)
static const int kCompilerHintsOffset
Definition: objects.h:7171
void TailCallExternalReference(const ExternalReference &ext, int num_arguments, int result_size)
T Min(T a, T b)
Definition: utils.h:234
static Operand UntagSmi(Register smi)
static const int kEmptyHashField
Definition: objects.h:8678
static const int kSharedFunctionInfoOffset
Definition: objects.h:7521
void EnterExitFrame(bool save_doubles, int stack_space=0)
#define FUNCTION_ADDR(f)
Definition: globals.h:345
void AddSubMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void AssertIsString(const Register &object)
const LowDwVfpRegister d8
void CheckPageFlagSet(const Register &object, const Register &scratch, int mask, Label *if_any_set)
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
static const int kBitField2Offset
Definition: objects.h:6462
void AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void LogicalImmediate(const Register &rd, const Register &rn, unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
Register AcquireSameSizeAs(const Register &reg)
void Sub(const Register &rd, const Register &rn, const Operand &operand)
static CPURegList GetSafepointSavedRegisters()
void EmitShift(const Register &rd, const Register &rn, Shift shift, unsigned amount)
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond=al)
void RememberedSetHelper(Register object, Register addr, Register scratch, SaveFPRegsMode save_fp, RememberedSetFinalAction and_then)
const intptr_t kDoubleAlignment
Definition: v8globals.h:52
const int kCharSize
Definition: globals.h:261
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void HasColor(Register object, Register scratch0, Register scratch1, Label *has_color, int first_bit, int second_bit)
static InstructionSequence * At(Address address)
void Fcvt(const FPRegister &fd, const FPRegister &fn)
MemOperand GlobalObjectMemOperand()
void LoadLiteral(const CPURegister &rt, int offset_from_pc)
const uint32_t kStringEncodingMask
Definition: objects.h:609
bool is_uintn(int64_t x, unsigned n)
Definition: utils.h:1108
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void CheckEnumCache(Register null_value, Label *call_runtime)
static const int kInstanceTypeOffset
Definition: objects.h:6459
void AssertUndefinedOrAllocationSite(Register object, Register scratch)
void AllocateHeapNumberWithValue(Register result, DwVfpRegister value, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required)
void LogicalMacro(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)