v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
29 
30 #include "v8.h"
31 
32 #if V8_TARGET_ARCH_ARM
33 
34 #include "bootstrapper.h"
35 #include "codegen.h"
36 #include "cpu-profiler.h"
37 #include "debug.h"
38 #include "isolate-inl.h"
39 #include "runtime.h"
40 
41 namespace v8 {
42 namespace internal {
43 
44 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45  : Assembler(arg_isolate, buffer, size),
46  generating_stub_(false),
47  has_frame_(false) {
48  if (isolate() != NULL) {
49  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
50  isolate());
51  }
52 }
53 
54 
55 void MacroAssembler::Jump(Register target, Condition cond) {
56  bx(target, cond);
57 }
58 
59 
60 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
61  Condition cond) {
62  ASSERT(RelocInfo::IsCodeTarget(rmode));
63  mov(pc, Operand(target, rmode), LeaveCC, cond);
64 }
65 
66 
67 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
68  Condition cond) {
69  ASSERT(!RelocInfo::IsCodeTarget(rmode));
70  Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
71 }
72 
73 
74 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
75  Condition cond) {
76  ASSERT(RelocInfo::IsCodeTarget(rmode));
77  // 'code' is always generated ARM code, never THUMB code
78  AllowDeferredHandleDereference embedding_raw_address;
79  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
80 }
81 
82 
83 int MacroAssembler::CallSize(Register target, Condition cond) {
84  return kInstrSize;
85 }
86 
87 
88 void MacroAssembler::Call(Register target, Condition cond) {
89  // Block constant pool for the call instruction sequence.
90  BlockConstPoolScope block_const_pool(this);
91  Label start;
92  bind(&start);
93  blx(target, cond);
94  ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
95 }
96 
97 
98 int MacroAssembler::CallSize(
99  Address target, RelocInfo::Mode rmode, Condition cond) {
100  int size = 2 * kInstrSize;
101  Instr mov_instr = cond | MOV | LeaveCC;
102  intptr_t immediate = reinterpret_cast<intptr_t>(target);
103  if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
104  size += kInstrSize;
105  }
106  return size;
107 }
108 
109 
110 int MacroAssembler::CallSizeNotPredictableCodeSize(
111  Address target, RelocInfo::Mode rmode, Condition cond) {
112  int size = 2 * kInstrSize;
113  Instr mov_instr = cond | MOV | LeaveCC;
114  intptr_t immediate = reinterpret_cast<intptr_t>(target);
115  if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
116  size += kInstrSize;
117  }
118  return size;
119 }
120 
121 
122 void MacroAssembler::Call(Address target,
123  RelocInfo::Mode rmode,
124  Condition cond,
126  // Block constant pool for the call instruction sequence.
127  BlockConstPoolScope block_const_pool(this);
128  Label start;
129  bind(&start);
130 
131  bool old_predictable_code_size = predictable_code_size();
132  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
133  set_predictable_code_size(true);
134  }
135 
136 #ifdef DEBUG
137  // Check the expected size before generating code to ensure we assume the same
138  // constant pool availability (e.g., whether constant pool is full or not).
139  int expected_size = CallSize(target, rmode, cond);
140 #endif
141 
142  // Call sequence on V7 or later may be :
143  // movw ip, #... @ call address low 16
144  // movt ip, #... @ call address high 16
145  // blx ip
146  // @ return address
147  // Or for pre-V7 or values that may be back-patched
148  // to avoid ICache flushes:
149  // ldr ip, [pc, #...] @ call address
150  // blx ip
151  // @ return address
152 
153  // Statement positions are expected to be recorded when the target
154  // address is loaded. The mov method will automatically record
155  // positions when pc is the target, since this is not the case here
156  // we have to do it explicitly.
157  positions_recorder()->WriteRecordedPositions();
158 
159  mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
160  blx(ip, cond);
161 
162  ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
163  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
164  set_predictable_code_size(old_predictable_code_size);
165  }
166 }
167 
168 
169 int MacroAssembler::CallSize(Handle<Code> code,
170  RelocInfo::Mode rmode,
171  TypeFeedbackId ast_id,
172  Condition cond) {
173  AllowDeferredHandleDereference using_raw_address;
174  return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
175 }
176 
177 
178 void MacroAssembler::Call(Handle<Code> code,
179  RelocInfo::Mode rmode,
180  TypeFeedbackId ast_id,
181  Condition cond,
183  Label start;
184  bind(&start);
185  ASSERT(RelocInfo::IsCodeTarget(rmode));
186  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
187  SetRecordedAstId(ast_id);
188  rmode = RelocInfo::CODE_TARGET_WITH_ID;
189  }
190  // 'code' is always generated ARM code, never THUMB code
191  AllowDeferredHandleDereference embedding_raw_address;
192  Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
193 }
194 
195 
196 void MacroAssembler::Ret(Condition cond) {
197  bx(lr, cond);
198 }
199 
200 
201 void MacroAssembler::Drop(int count, Condition cond) {
202  if (count > 0) {
203  add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
204  }
205 }
206 
207 
208 void MacroAssembler::Ret(int drop, Condition cond) {
209  Drop(drop, cond);
210  Ret(cond);
211 }
212 
213 
214 void MacroAssembler::Swap(Register reg1,
215  Register reg2,
216  Register scratch,
217  Condition cond) {
218  if (scratch.is(no_reg)) {
219  eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
220  eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
221  eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
222  } else {
223  mov(scratch, reg1, LeaveCC, cond);
224  mov(reg1, reg2, LeaveCC, cond);
225  mov(reg2, scratch, LeaveCC, cond);
226  }
227 }
228 
229 
230 void MacroAssembler::Call(Label* target) {
231  bl(target);
232 }
233 
234 
235 void MacroAssembler::Push(Handle<Object> handle) {
236  mov(ip, Operand(handle));
237  push(ip);
238 }
239 
240 
241 void MacroAssembler::Move(Register dst, Handle<Object> value) {
243  if (value->IsSmi()) {
244  mov(dst, Operand(value));
245  } else {
246  ASSERT(value->IsHeapObject());
247  if (isolate()->heap()->InNewSpace(*value)) {
248  Handle<Cell> cell = isolate()->factory()->NewCell(value);
249  mov(dst, Operand(cell));
250  ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
251  } else {
252  mov(dst, Operand(value));
253  }
254  }
255 }
256 
257 
258 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
259  if (!dst.is(src)) {
260  mov(dst, src, LeaveCC, cond);
261  }
262 }
263 
264 
265 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
266  if (!dst.is(src)) {
267  vmov(dst, src);
268  }
269 }
270 
271 
272 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
273  Condition cond) {
274  if (!src2.is_reg() &&
275  !src2.must_output_reloc_info(this) &&
276  src2.immediate() == 0) {
277  mov(dst, Operand::Zero(), LeaveCC, cond);
278  } else if (!src2.is_single_instruction(this) &&
279  !src2.must_output_reloc_info(this) &&
280  CpuFeatures::IsSupported(ARMv7) &&
281  IsPowerOf2(src2.immediate() + 1)) {
282  ubfx(dst, src1, 0,
283  WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
284  } else {
285  and_(dst, src1, src2, LeaveCC, cond);
286  }
287 }
288 
289 
290 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
291  Condition cond) {
292  ASSERT(lsb < 32);
293  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
294  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
295  and_(dst, src1, Operand(mask), LeaveCC, cond);
296  if (lsb != 0) {
297  mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
298  }
299  } else {
300  ubfx(dst, src1, lsb, width, cond);
301  }
302 }
303 
304 
305 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
306  Condition cond) {
307  ASSERT(lsb < 32);
308  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
309  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
310  and_(dst, src1, Operand(mask), LeaveCC, cond);
311  int shift_up = 32 - lsb - width;
312  int shift_down = lsb + shift_up;
313  if (shift_up != 0) {
314  mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
315  }
316  if (shift_down != 0) {
317  mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
318  }
319  } else {
320  sbfx(dst, src1, lsb, width, cond);
321  }
322 }
323 
324 
325 void MacroAssembler::Bfi(Register dst,
326  Register src,
327  Register scratch,
328  int lsb,
329  int width,
330  Condition cond) {
331  ASSERT(0 <= lsb && lsb < 32);
332  ASSERT(0 <= width && width < 32);
333  ASSERT(lsb + width < 32);
334  ASSERT(!scratch.is(dst));
335  if (width == 0) return;
336  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
337  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
338  bic(dst, dst, Operand(mask));
339  and_(scratch, src, Operand((1 << width) - 1));
340  mov(scratch, Operand(scratch, LSL, lsb));
341  orr(dst, dst, scratch);
342  } else {
343  bfi(dst, src, lsb, width, cond);
344  }
345 }
346 
347 
348 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
349  Condition cond) {
350  ASSERT(lsb < 32);
351  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
352  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
353  bic(dst, src, Operand(mask));
354  } else {
355  Move(dst, src, cond);
356  bfc(dst, lsb, width, cond);
357  }
358 }
359 
360 
361 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
362  Condition cond) {
363  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
364  ASSERT(!dst.is(pc) && !src.rm().is(pc));
365  ASSERT((satpos >= 0) && (satpos <= 31));
366 
367  // These asserts are required to ensure compatibility with the ARMv7
368  // implementation.
369  ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
370  ASSERT(src.rs().is(no_reg));
371 
372  Label done;
373  int satval = (1 << satpos) - 1;
374 
375  if (cond != al) {
376  b(NegateCondition(cond), &done); // Skip saturate if !condition.
377  }
378  if (!(src.is_reg() && dst.is(src.rm()))) {
379  mov(dst, src);
380  }
381  tst(dst, Operand(~satval));
382  b(eq, &done);
383  mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
384  mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
385  bind(&done);
386  } else {
387  usat(dst, satpos, src, cond);
388  }
389 }
390 
391 
392 void MacroAssembler::Load(Register dst,
393  const MemOperand& src,
394  Representation r) {
395  ASSERT(!r.IsDouble());
396  if (r.IsInteger8()) {
397  ldrsb(dst, src);
398  } else if (r.IsUInteger8()) {
399  ldrb(dst, src);
400  } else if (r.IsInteger16()) {
401  ldrsh(dst, src);
402  } else if (r.IsUInteger16()) {
403  ldrh(dst, src);
404  } else {
405  ldr(dst, src);
406  }
407 }
408 
409 
410 void MacroAssembler::Store(Register src,
411  const MemOperand& dst,
412  Representation r) {
413  ASSERT(!r.IsDouble());
414  if (r.IsInteger8() || r.IsUInteger8()) {
415  strb(src, dst);
416  } else if (r.IsInteger16() || r.IsUInteger16()) {
417  strh(src, dst);
418  } else {
419  str(src, dst);
420  }
421 }
422 
423 
424 void MacroAssembler::LoadRoot(Register destination,
425  Heap::RootListIndex index,
426  Condition cond) {
427  if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
428  isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
429  !predictable_code_size()) {
430  // The CPU supports fast immediate values, and this root will never
431  // change. We will load it as a relocatable immediate value.
432  Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
433  mov(destination, Operand(root), LeaveCC, cond);
434  return;
435  }
436  ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
437 }
438 
439 
440 void MacroAssembler::StoreRoot(Register source,
441  Heap::RootListIndex index,
442  Condition cond) {
443  str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
444 }
445 
446 
447 void MacroAssembler::InNewSpace(Register object,
448  Register scratch,
449  Condition cond,
450  Label* branch) {
451  ASSERT(cond == eq || cond == ne);
452  and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
453  cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
454  b(cond, branch);
455 }
456 
457 
458 void MacroAssembler::RecordWriteField(
459  Register object,
460  int offset,
461  Register value,
462  Register dst,
463  LinkRegisterStatus lr_status,
464  SaveFPRegsMode save_fp,
465  RememberedSetAction remembered_set_action,
466  SmiCheck smi_check) {
467  // First, check if a write barrier is even needed. The tests below
468  // catch stores of Smis.
469  Label done;
470 
471  // Skip barrier if writing a smi.
472  if (smi_check == INLINE_SMI_CHECK) {
473  JumpIfSmi(value, &done);
474  }
475 
476  // Although the object register is tagged, the offset is relative to the start
477  // of the object, so so offset must be a multiple of kPointerSize.
478  ASSERT(IsAligned(offset, kPointerSize));
479 
480  add(dst, object, Operand(offset - kHeapObjectTag));
481  if (emit_debug_code()) {
482  Label ok;
483  tst(dst, Operand((1 << kPointerSizeLog2) - 1));
484  b(eq, &ok);
485  stop("Unaligned cell in write barrier");
486  bind(&ok);
487  }
488 
489  RecordWrite(object,
490  dst,
491  value,
492  lr_status,
493  save_fp,
494  remembered_set_action,
496 
497  bind(&done);
498 
499  // Clobber clobbered input registers when running with the debug-code flag
500  // turned on to provoke errors.
501  if (emit_debug_code()) {
502  mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
503  mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
504  }
505 }
506 
507 
508 // Will clobber 4 registers: object, address, scratch, ip. The
509 // register 'object' contains a heap object pointer. The heap object
510 // tag is shifted away.
511 void MacroAssembler::RecordWrite(Register object,
512  Register address,
513  Register value,
514  LinkRegisterStatus lr_status,
515  SaveFPRegsMode fp_mode,
516  RememberedSetAction remembered_set_action,
517  SmiCheck smi_check) {
518  ASSERT(!object.is(value));
519  if (emit_debug_code()) {
520  ldr(ip, MemOperand(address));
521  cmp(ip, value);
522  Check(eq, kWrongAddressOrValuePassedToRecordWrite);
523  }
524 
525  // Count number of write barriers in generated code.
526  isolate()->counters()->write_barriers_static()->Increment();
527  // TODO(mstarzinger): Dynamic counter missing.
528 
529  // First, check if a write barrier is even needed. The tests below
530  // catch stores of smis and stores into the young generation.
531  Label done;
532 
533  if (smi_check == INLINE_SMI_CHECK) {
534  JumpIfSmi(value, &done);
535  }
536 
537  CheckPageFlag(value,
538  value, // Used as scratch.
539  MemoryChunk::kPointersToHereAreInterestingMask,
540  eq,
541  &done);
542  CheckPageFlag(object,
543  value, // Used as scratch.
544  MemoryChunk::kPointersFromHereAreInterestingMask,
545  eq,
546  &done);
547 
548  // Record the actual write.
549  if (lr_status == kLRHasNotBeenSaved) {
550  push(lr);
551  }
552  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
553  CallStub(&stub);
554  if (lr_status == kLRHasNotBeenSaved) {
555  pop(lr);
556  }
557 
558  bind(&done);
559 
560  // Clobber clobbered registers when running with the debug-code flag
561  // turned on to provoke errors.
562  if (emit_debug_code()) {
563  mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
564  mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
565  }
566 }
567 
568 
569 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
570  Register address,
571  Register scratch,
572  SaveFPRegsMode fp_mode,
573  RememberedSetFinalAction and_then) {
574  Label done;
575  if (emit_debug_code()) {
576  Label ok;
577  JumpIfNotInNewSpace(object, scratch, &ok);
578  stop("Remembered set pointer is in new space");
579  bind(&ok);
580  }
581  // Load store buffer top.
582  ExternalReference store_buffer =
583  ExternalReference::store_buffer_top(isolate());
584  mov(ip, Operand(store_buffer));
585  ldr(scratch, MemOperand(ip));
586  // Store pointer to buffer and increment buffer top.
587  str(address, MemOperand(scratch, kPointerSize, PostIndex));
588  // Write back new top of buffer.
589  str(scratch, MemOperand(ip));
590  // Call stub on end of buffer.
591  // Check for end of buffer.
592  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
593  if (and_then == kFallThroughAtEnd) {
594  b(eq, &done);
595  } else {
596  ASSERT(and_then == kReturnAtEnd);
597  Ret(eq);
598  }
599  push(lr);
600  StoreBufferOverflowStub store_buffer_overflow =
601  StoreBufferOverflowStub(fp_mode);
602  CallStub(&store_buffer_overflow);
603  pop(lr);
604  bind(&done);
605  if (and_then == kReturnAtEnd) {
606  Ret();
607  }
608 }
609 
610 
611 void MacroAssembler::PushFixedFrame(Register marker_reg) {
612  ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
613  stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
614  cp.bit() |
615  (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
616  fp.bit() |
617  lr.bit());
618 }
619 
620 
621 void MacroAssembler::PopFixedFrame(Register marker_reg) {
622  ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
623  ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
624  cp.bit() |
625  (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
626  fp.bit() |
627  lr.bit());
628 }
629 
630 
631 // Push and pop all registers that can hold pointers.
632 void MacroAssembler::PushSafepointRegisters() {
633  // Safepoints expect a block of contiguous register values starting with r0:
635  // Safepoints expect a block of kNumSafepointRegisters values on the
636  // stack, so adjust the stack for unsaved registers.
637  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
638  ASSERT(num_unsaved >= 0);
639  sub(sp, sp, Operand(num_unsaved * kPointerSize));
641 }
642 
643 
644 void MacroAssembler::PopSafepointRegisters() {
645  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
647  add(sp, sp, Operand(num_unsaved * kPointerSize));
648 }
649 
650 
651 void MacroAssembler::PushSafepointRegistersAndDoubles() {
652  // Number of d-regs not known at snapshot time.
653  ASSERT(!Serializer::enabled());
654  PushSafepointRegisters();
655  // Only save allocatable registers.
657  ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
658  if (CpuFeatures::IsSupported(VFP32DREGS)) {
659  vstm(db_w, sp, d16, d31);
660  }
661  vstm(db_w, sp, d0, d13);
662 }
663 
664 
665 void MacroAssembler::PopSafepointRegistersAndDoubles() {
666  // Number of d-regs not known at snapshot time.
667  ASSERT(!Serializer::enabled());
668  // Only save allocatable registers.
670  ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
671  vldm(ia_w, sp, d0, d13);
672  if (CpuFeatures::IsSupported(VFP32DREGS)) {
673  vldm(ia_w, sp, d16, d31);
674  }
675  PopSafepointRegisters();
676 }
677 
678 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
679  Register dst) {
680  str(src, SafepointRegistersAndDoublesSlot(dst));
681 }
682 
683 
684 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
685  str(src, SafepointRegisterSlot(dst));
686 }
687 
688 
689 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
690  ldr(dst, SafepointRegisterSlot(src));
691 }
692 
693 
694 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
695  // The registers are pushed starting with the highest encoding,
696  // which means that lowest encodings are closest to the stack pointer.
697  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
698  return reg_code;
699 }
700 
701 
702 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
703  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
704 }
705 
706 
707 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
708  // Number of d-regs not known at snapshot time.
709  ASSERT(!Serializer::enabled());
710  // General purpose registers are pushed last on the stack.
711  int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
712  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
713  return MemOperand(sp, doubles_size + register_offset);
714 }
715 
716 
717 void MacroAssembler::Ldrd(Register dst1, Register dst2,
718  const MemOperand& src, Condition cond) {
719  ASSERT(src.rm().is(no_reg));
720  ASSERT(!dst1.is(lr)); // r14.
721 
722  // V8 does not use this addressing mode, so the fallback code
723  // below doesn't support it yet.
724  ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
725 
726  // Generate two ldr instructions if ldrd is not available.
727  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
728  (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
729  CpuFeatureScope scope(this, ARMv7);
730  ldrd(dst1, dst2, src, cond);
731  } else {
732  if ((src.am() == Offset) || (src.am() == NegOffset)) {
733  MemOperand src2(src);
734  src2.set_offset(src2.offset() + 4);
735  if (dst1.is(src.rn())) {
736  ldr(dst2, src2, cond);
737  ldr(dst1, src, cond);
738  } else {
739  ldr(dst1, src, cond);
740  ldr(dst2, src2, cond);
741  }
742  } else { // PostIndex or NegPostIndex.
743  ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
744  if (dst1.is(src.rn())) {
745  ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
746  ldr(dst1, src, cond);
747  } else {
748  MemOperand src2(src);
749  src2.set_offset(src2.offset() - 4);
750  ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
751  ldr(dst2, src2, cond);
752  }
753  }
754  }
755 }
756 
757 
758 void MacroAssembler::Strd(Register src1, Register src2,
759  const MemOperand& dst, Condition cond) {
760  ASSERT(dst.rm().is(no_reg));
761  ASSERT(!src1.is(lr)); // r14.
762 
763  // V8 does not use this addressing mode, so the fallback code
764  // below doesn't support it yet.
765  ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
766 
767  // Generate two str instructions if strd is not available.
768  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
769  (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
770  CpuFeatureScope scope(this, ARMv7);
771  strd(src1, src2, dst, cond);
772  } else {
773  MemOperand dst2(dst);
774  if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
775  dst2.set_offset(dst2.offset() + 4);
776  str(src1, dst, cond);
777  str(src2, dst2, cond);
778  } else { // PostIndex or NegPostIndex.
779  ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
780  dst2.set_offset(dst2.offset() - 4);
781  str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
782  str(src2, dst2, cond);
783  }
784  }
785 }
786 
787 
788 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
789  // If needed, restore wanted bits of FPSCR.
790  Label fpscr_done;
791  vmrs(scratch);
792  tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
793  b(ne, &fpscr_done);
794  orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
795  vmsr(scratch);
796  bind(&fpscr_done);
797 }
798 
799 
800 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
801  const DwVfpRegister src,
802  const Condition cond) {
803  vsub(dst, src, kDoubleRegZero, cond);
804 }
805 
806 
807 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
808  const DwVfpRegister src2,
809  const Condition cond) {
810  // Compare and move FPSCR flags to the normal condition flags.
811  VFPCompareAndLoadFlags(src1, src2, pc, cond);
812 }
813 
814 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
815  const double src2,
816  const Condition cond) {
817  // Compare and move FPSCR flags to the normal condition flags.
818  VFPCompareAndLoadFlags(src1, src2, pc, cond);
819 }
820 
821 
822 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
823  const DwVfpRegister src2,
824  const Register fpscr_flags,
825  const Condition cond) {
826  // Compare and load FPSCR.
827  vcmp(src1, src2, cond);
828  vmrs(fpscr_flags, cond);
829 }
830 
831 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
832  const double src2,
833  const Register fpscr_flags,
834  const Condition cond) {
835  // Compare and load FPSCR.
836  vcmp(src1, src2, cond);
837  vmrs(fpscr_flags, cond);
838 }
839 
840 void MacroAssembler::Vmov(const DwVfpRegister dst,
841  const double imm,
842  const Register scratch) {
843  static const DoubleRepresentation minus_zero(-0.0);
844  static const DoubleRepresentation zero(0.0);
845  DoubleRepresentation value_rep(imm);
846  // Handle special values first.
847  if (value_rep == zero) {
848  vmov(dst, kDoubleRegZero);
849  } else if (value_rep == minus_zero) {
850  vneg(dst, kDoubleRegZero);
851  } else {
852  vmov(dst, imm, scratch);
853  }
854 }
855 
856 
857 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
858  if (src.code() < 16) {
859  const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
860  vmov(dst, loc.high());
861  } else {
862  vmov(dst, VmovIndexHi, src);
863  }
864 }
865 
866 
867 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
868  if (dst.code() < 16) {
869  const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
870  vmov(loc.high(), src);
871  } else {
872  vmov(dst, VmovIndexHi, src);
873  }
874 }
875 
876 
877 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
878  if (src.code() < 16) {
879  const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
880  vmov(dst, loc.low());
881  } else {
882  vmov(dst, VmovIndexLo, src);
883  }
884 }
885 
886 
887 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
888  if (dst.code() < 16) {
889  const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
890  vmov(loc.low(), src);
891  } else {
892  vmov(dst, VmovIndexLo, src);
893  }
894 }
895 
896 
897 void MacroAssembler::LoadConstantPoolPointerRegister() {
898  if (FLAG_enable_ool_constant_pool) {
899  int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
900  pc_offset() - Instruction::kPCReadOffset;
901  ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
902  ldr(pp, MemOperand(pc, constant_pool_offset));
903  }
904 }
905 
906 
907 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
908  if (frame_mode == BUILD_STUB_FRAME) {
909  PushFixedFrame();
910  Push(Smi::FromInt(StackFrame::STUB));
911  // Adjust FP to point to saved FP.
912  add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
913  } else {
914  PredictableCodeSizeScope predictible_code_size_scope(
915  this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
916  // The following three instructions must remain together and unmodified
917  // for code aging to work properly.
918  if (isolate()->IsCodePreAgingActive()) {
919  // Pre-age the code.
920  Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
921  add(r0, pc, Operand(-8));
922  ldr(pc, MemOperand(pc, -4));
923  emit_code_stub_address(stub);
924  } else {
925  PushFixedFrame(r1);
926  nop(ip.code());
927  // Adjust FP to point to saved FP.
928  add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
929  }
930  }
931  if (FLAG_enable_ool_constant_pool) {
932  LoadConstantPoolPointerRegister();
933  set_constant_pool_available(true);
934  }
935 }
936 
937 
938 void MacroAssembler::EnterFrame(StackFrame::Type type,
939  bool load_constant_pool) {
940  // r0-r3: preserved
941  PushFixedFrame();
942  if (FLAG_enable_ool_constant_pool && load_constant_pool) {
943  LoadConstantPoolPointerRegister();
944  }
945  mov(ip, Operand(Smi::FromInt(type)));
946  push(ip);
947  mov(ip, Operand(CodeObject()));
948  push(ip);
949  // Adjust FP to point to saved FP.
950  add(fp, sp,
951  Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
952 }
953 
954 
955 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
956  // r0: preserved
957  // r1: preserved
958  // r2: preserved
959 
960  // Drop the execution stack down to the frame pointer and restore
961  // the caller frame pointer, return address and constant pool pointer
962  // (if FLAG_enable_ool_constant_pool).
963  int frame_ends;
964  if (FLAG_enable_ool_constant_pool) {
965  add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
966  frame_ends = pc_offset();
967  ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
968  } else {
969  mov(sp, fp);
970  frame_ends = pc_offset();
971  ldm(ia_w, sp, fp.bit() | lr.bit());
972  }
973  return frame_ends;
974 }
975 
976 
977 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
978  // Set up the frame structure on the stack.
979  ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
980  ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
981  ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
982  Push(lr, fp);
983  mov(fp, Operand(sp)); // Set up new frame pointer.
984  // Reserve room for saved entry sp and code object.
985  sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
986  if (emit_debug_code()) {
987  mov(ip, Operand::Zero());
988  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
989  }
990  if (FLAG_enable_ool_constant_pool) {
991  str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
992  LoadConstantPoolPointerRegister();
993  }
994  mov(ip, Operand(CodeObject()));
995  str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
996 
997  // Save the frame pointer and the context in top.
998  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
999  str(fp, MemOperand(ip));
1000  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1001  str(cp, MemOperand(ip));
1002 
1003  // Optionally save all double registers.
1004  if (save_doubles) {
1005  SaveFPRegs(sp, ip);
1006  // Note that d0 will be accessible at
1007  // fp - ExitFrameConstants::kFrameSize -
1008  // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
1009  // since the sp slot, code slot and constant pool slot (if
1010  // FLAG_enable_ool_constant_pool) were pushed after the fp.
1011  }
1012 
1013  // Reserve place for the return address and stack space and align the frame
1014  // preparing for calling the runtime function.
1015  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1016  sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1017  if (frame_alignment > 0) {
1018  ASSERT(IsPowerOf2(frame_alignment));
1019  and_(sp, sp, Operand(-frame_alignment));
1020  }
1021 
1022  // Set the exit frame sp value to point just before the return address
1023  // location.
1024  add(ip, sp, Operand(kPointerSize));
1025  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1026 }
1027 
1028 
1029 void MacroAssembler::InitializeNewString(Register string,
1030  Register length,
1031  Heap::RootListIndex map_index,
1032  Register scratch1,
1033  Register scratch2) {
1034  SmiTag(scratch1, length);
1035  LoadRoot(scratch2, map_index);
1036  str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1037  mov(scratch1, Operand(String::kEmptyHashField));
1038  str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1039  str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1040 }
1041 
1042 
1043 int MacroAssembler::ActivationFrameAlignment() {
1044 #if V8_HOST_ARCH_ARM
1045  // Running on the real platform. Use the alignment as mandated by the local
1046  // environment.
1047  // Note: This will break if we ever start generating snapshots on one ARM
1048  // platform for another ARM platform with a different alignment.
1049  return OS::ActivationFrameAlignment();
1050 #else // V8_HOST_ARCH_ARM
1051  // If we are using the simulator then we should always align to the expected
1052  // alignment. As the simulator is used to generate snapshots we do not know
1053  // if the target platform will need alignment, so this is controlled from a
1054  // flag.
1055  return FLAG_sim_stack_alignment;
1056 #endif // V8_HOST_ARCH_ARM
1057 }
1058 
1059 
1060 void MacroAssembler::LeaveExitFrame(bool save_doubles,
1061  Register argument_count,
1062  bool restore_context) {
1063  ConstantPoolUnavailableScope constant_pool_unavailable(this);
1064 
1065  // Optionally restore all double registers.
1066  if (save_doubles) {
1067  // Calculate the stack location of the saved doubles and restore them.
1068  const int offset = ExitFrameConstants::kFrameSize;
1069  sub(r3, fp,
1070  Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1071  RestoreFPRegs(r3, ip);
1072  }
1073 
1074  // Clear top frame.
1075  mov(r3, Operand::Zero());
1076  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1077  str(r3, MemOperand(ip));
1078 
1079  // Restore current context from top and clear it in debug mode.
1080  if (restore_context) {
1081  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1082  ldr(cp, MemOperand(ip));
1083  }
1084 #ifdef DEBUG
1085  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1086  str(r3, MemOperand(ip));
1087 #endif
1088 
1089  // Tear down the exit frame, pop the arguments, and return.
1090  if (FLAG_enable_ool_constant_pool) {
1091  ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1092  }
1093  mov(sp, Operand(fp));
1094  ldm(ia_w, sp, fp.bit() | lr.bit());
1095  if (argument_count.is_valid()) {
1096  add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1097  }
1098 }
1099 
1100 
1101 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1102  if (use_eabi_hardfloat()) {
1103  Move(dst, d0);
1104  } else {
1105  vmov(dst, r0, r1);
1106  }
1107 }
1108 
1109 
1110 // On ARM this is just a synonym to make the purpose clear.
1111 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1112  MovFromFloatResult(dst);
1113 }
1114 
1115 
1116 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1117  const ParameterCount& actual,
1118  Handle<Code> code_constant,
1119  Register code_reg,
1120  Label* done,
1121  bool* definitely_mismatches,
1122  InvokeFlag flag,
1123  const CallWrapper& call_wrapper) {
1124  bool definitely_matches = false;
1125  *definitely_mismatches = false;
1126  Label regular_invoke;
1127 
1128  // Check whether the expected and actual arguments count match. If not,
1129  // setup registers according to contract with ArgumentsAdaptorTrampoline:
1130  // r0: actual arguments count
1131  // r1: function (passed through to callee)
1132  // r2: expected arguments count
1133 
1134  // The code below is made a lot easier because the calling code already sets
1135  // up actual and expected registers according to the contract if values are
1136  // passed in registers.
1137  ASSERT(actual.is_immediate() || actual.reg().is(r0));
1138  ASSERT(expected.is_immediate() || expected.reg().is(r2));
1139  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1140 
1141  if (expected.is_immediate()) {
1142  ASSERT(actual.is_immediate());
1143  if (expected.immediate() == actual.immediate()) {
1144  definitely_matches = true;
1145  } else {
1146  mov(r0, Operand(actual.immediate()));
1147  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1148  if (expected.immediate() == sentinel) {
1149  // Don't worry about adapting arguments for builtins that
1150  // don't want that done. Skip adaption code by making it look
1151  // like we have a match between expected and actual number of
1152  // arguments.
1153  definitely_matches = true;
1154  } else {
1155  *definitely_mismatches = true;
1156  mov(r2, Operand(expected.immediate()));
1157  }
1158  }
1159  } else {
1160  if (actual.is_immediate()) {
1161  cmp(expected.reg(), Operand(actual.immediate()));
1162  b(eq, &regular_invoke);
1163  mov(r0, Operand(actual.immediate()));
1164  } else {
1165  cmp(expected.reg(), Operand(actual.reg()));
1166  b(eq, &regular_invoke);
1167  }
1168  }
1169 
1170  if (!definitely_matches) {
1171  if (!code_constant.is_null()) {
1172  mov(r3, Operand(code_constant));
1173  add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1174  }
1175 
1176  Handle<Code> adaptor =
1177  isolate()->builtins()->ArgumentsAdaptorTrampoline();
1178  if (flag == CALL_FUNCTION) {
1179  call_wrapper.BeforeCall(CallSize(adaptor));
1180  Call(adaptor);
1181  call_wrapper.AfterCall();
1182  if (!*definitely_mismatches) {
1183  b(done);
1184  }
1185  } else {
1186  Jump(adaptor, RelocInfo::CODE_TARGET);
1187  }
1188  bind(&regular_invoke);
1189  }
1190 }
1191 
1192 
1193 void MacroAssembler::InvokeCode(Register code,
1194  const ParameterCount& expected,
1195  const ParameterCount& actual,
1196  InvokeFlag flag,
1197  const CallWrapper& call_wrapper) {
1198  // You can't call a function without a valid frame.
1199  ASSERT(flag == JUMP_FUNCTION || has_frame());
1200 
1201  Label done;
1202  bool definitely_mismatches = false;
1203  InvokePrologue(expected, actual, Handle<Code>::null(), code,
1204  &done, &definitely_mismatches, flag,
1205  call_wrapper);
1206  if (!definitely_mismatches) {
1207  if (flag == CALL_FUNCTION) {
1208  call_wrapper.BeforeCall(CallSize(code));
1209  Call(code);
1210  call_wrapper.AfterCall();
1211  } else {
1212  ASSERT(flag == JUMP_FUNCTION);
1213  Jump(code);
1214  }
1215 
1216  // Continue here if InvokePrologue does handle the invocation due to
1217  // mismatched parameter counts.
1218  bind(&done);
1219  }
1220 }
1221 
1222 
1223 void MacroAssembler::InvokeFunction(Register fun,
1224  const ParameterCount& actual,
1225  InvokeFlag flag,
1226  const CallWrapper& call_wrapper) {
1227  // You can't call a function without a valid frame.
1228  ASSERT(flag == JUMP_FUNCTION || has_frame());
1229 
1230  // Contract with called JS functions requires that function is passed in r1.
1231  ASSERT(fun.is(r1));
1232 
1233  Register expected_reg = r2;
1234  Register code_reg = r3;
1235 
1236  ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1237  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1238  ldr(expected_reg,
1239  FieldMemOperand(code_reg,
1240  SharedFunctionInfo::kFormalParameterCountOffset));
1241  SmiUntag(expected_reg);
1242  ldr(code_reg,
1243  FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1244 
1245  ParameterCount expected(expected_reg);
1246  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1247 }
1248 
1249 
1250 void MacroAssembler::InvokeFunction(Register function,
1251  const ParameterCount& expected,
1252  const ParameterCount& actual,
1253  InvokeFlag flag,
1254  const CallWrapper& call_wrapper) {
1255  // You can't call a function without a valid frame.
1256  ASSERT(flag == JUMP_FUNCTION || has_frame());
1257 
1258  // Contract with called JS functions requires that function is passed in r1.
1259  ASSERT(function.is(r1));
1260 
1261  // Get the function and setup the context.
1262  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1263 
1264  // We call indirectly through the code field in the function to
1265  // allow recompilation to take effect without changing any of the
1266  // call sites.
1267  ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1268  InvokeCode(r3, expected, actual, flag, call_wrapper);
1269 }
1270 
1271 
1272 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1273  const ParameterCount& expected,
1274  const ParameterCount& actual,
1275  InvokeFlag flag,
1276  const CallWrapper& call_wrapper) {
1277  Move(r1, function);
1278  InvokeFunction(r1, expected, actual, flag, call_wrapper);
1279 }
1280 
1281 
1282 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1283  Register map,
1284  Register scratch,
1285  Label* fail) {
1286  ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1287  IsInstanceJSObjectType(map, scratch, fail);
1288 }
1289 
1290 
1291 void MacroAssembler::IsInstanceJSObjectType(Register map,
1292  Register scratch,
1293  Label* fail) {
1294  ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1295  cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1296  b(lt, fail);
1297  cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1298  b(gt, fail);
1299 }
1300 
1301 
1302 void MacroAssembler::IsObjectJSStringType(Register object,
1303  Register scratch,
1304  Label* fail) {
1305  ASSERT(kNotStringTag != 0);
1306 
1307  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1308  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1309  tst(scratch, Operand(kIsNotStringMask));
1310  b(ne, fail);
1311 }
1312 
1313 
1314 void MacroAssembler::IsObjectNameType(Register object,
1315  Register scratch,
1316  Label* fail) {
1317  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1318  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1319  cmp(scratch, Operand(LAST_NAME_TYPE));
1320  b(hi, fail);
1321 }
1322 
1323 
1324 #ifdef ENABLE_DEBUGGER_SUPPORT
1325 void MacroAssembler::DebugBreak() {
1326  mov(r0, Operand::Zero());
1327  mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1328  CEntryStub ces(1);
1329  ASSERT(AllowThisStubCall(&ces));
1330  Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
1331 }
1332 #endif
1333 
1334 
1335 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1336  int handler_index) {
1337  // Adjust this code if not the case.
1338  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1339  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1340  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1341  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1342  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1343  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1344 
1345  // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
1346  // We will build up the handler from the bottom by pushing on the stack.
1347  // Set up the code object (r5) and the state (r6) for pushing.
1348  unsigned state =
1349  StackHandler::IndexField::encode(handler_index) |
1350  StackHandler::KindField::encode(kind);
1351  mov(r5, Operand(CodeObject()));
1352  mov(r6, Operand(state));
1353 
1354  // Push the frame pointer, context, state, and code object.
1355  if (kind == StackHandler::JS_ENTRY) {
1356  mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
1357  mov(ip, Operand::Zero()); // NULL frame pointer.
1358  stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
1359  } else {
1360  stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1361  }
1362 
1363  // Link the current handler as the next handler.
1364  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1365  ldr(r5, MemOperand(r6));
1366  push(r5);
1367  // Set this new handler as the current one.
1368  str(sp, MemOperand(r6));
1369 }
1370 
1371 
1372 void MacroAssembler::PopTryHandler() {
1373  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1374  pop(r1);
1375  mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1376  add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1377  str(r1, MemOperand(ip));
1378 }
1379 
1380 
1381 void MacroAssembler::JumpToHandlerEntry() {
1382  // Compute the handler entry address and jump to it. The handler table is
1383  // a fixed array of (smi-tagged) code offsets.
1384  // r0 = exception, r1 = code object, r2 = state.
1385 
1386  ConstantPoolUnavailableScope constant_pool_unavailable(this);
1387  if (FLAG_enable_ool_constant_pool) {
1388  ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool.
1389  }
1390  ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
1391  add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1392  mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
1393  ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
1394  add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1395  add(pc, r1, Operand::SmiUntag(r2)); // Jump
1396 }
1397 
1398 
1399 void MacroAssembler::Throw(Register value) {
1400  // Adjust this code if not the case.
1401  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1402  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1403  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1404  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1405  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1406  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1407 
1408  // The exception is expected in r0.
1409  if (!value.is(r0)) {
1410  mov(r0, value);
1411  }
1412  // Drop the stack pointer to the top of the top handler.
1413  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1414  ldr(sp, MemOperand(r3));
1415  // Restore the next handler.
1416  pop(r2);
1417  str(r2, MemOperand(r3));
1418 
1419  // Get the code object (r1) and state (r2). Restore the context and frame
1420  // pointer.
1421  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1422 
1423  // If the handler is a JS frame, restore the context to the frame.
1424  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1425  // or cp.
1426  tst(cp, cp);
1427  str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1428 
1429  JumpToHandlerEntry();
1430 }
1431 
1432 
1433 void MacroAssembler::ThrowUncatchable(Register value) {
1434  // Adjust this code if not the case.
1435  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1436  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1437  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1438  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1439  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1440  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1441 
1442  // The exception is expected in r0.
1443  if (!value.is(r0)) {
1444  mov(r0, value);
1445  }
1446  // Drop the stack pointer to the top of the top stack handler.
1447  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1448  ldr(sp, MemOperand(r3));
1449 
1450  // Unwind the handlers until the ENTRY handler is found.
1451  Label fetch_next, check_kind;
1452  jmp(&check_kind);
1453  bind(&fetch_next);
1454  ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1455 
1456  bind(&check_kind);
1457  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1458  ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1459  tst(r2, Operand(StackHandler::KindField::kMask));
1460  b(ne, &fetch_next);
1461 
1462  // Set the top handler address to next handler past the top ENTRY handler.
1463  pop(r2);
1464  str(r2, MemOperand(r3));
1465  // Get the code object (r1) and state (r2). Clear the context and frame
1466  // pointer (0 was saved in the handler).
1467  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1468 
1469  JumpToHandlerEntry();
1470 }
1471 
1472 
1473 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1474  Register scratch,
1475  Label* miss) {
1476  Label same_contexts;
1477 
1478  ASSERT(!holder_reg.is(scratch));
1479  ASSERT(!holder_reg.is(ip));
1480  ASSERT(!scratch.is(ip));
1481 
1482  // Load current lexical context from the stack frame.
1483  ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1484  // In debug mode, make sure the lexical context is set.
1485 #ifdef DEBUG
1486  cmp(scratch, Operand::Zero());
1487  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1488 #endif
1489 
1490  // Load the native context of the current context.
1491  int offset =
1492  Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1493  ldr(scratch, FieldMemOperand(scratch, offset));
1494  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1495 
1496  // Check the context is a native context.
1497  if (emit_debug_code()) {
1498  // Cannot use ip as a temporary in this verification code. Due to the fact
1499  // that ip is clobbered as part of cmp with an object Operand.
1500  push(holder_reg); // Temporarily save holder on the stack.
1501  // Read the first word and compare to the native_context_map.
1502  ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1503  LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1504  cmp(holder_reg, ip);
1505  Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1506  pop(holder_reg); // Restore holder.
1507  }
1508 
1509  // Check if both contexts are the same.
1510  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1511  cmp(scratch, Operand(ip));
1512  b(eq, &same_contexts);
1513 
1514  // Check the context is a native context.
1515  if (emit_debug_code()) {
1516  // Cannot use ip as a temporary in this verification code. Due to the fact
1517  // that ip is clobbered as part of cmp with an object Operand.
1518  push(holder_reg); // Temporarily save holder on the stack.
1519  mov(holder_reg, ip); // Move ip to its holding place.
1520  LoadRoot(ip, Heap::kNullValueRootIndex);
1521  cmp(holder_reg, ip);
1522  Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1523 
1524  ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1525  LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1526  cmp(holder_reg, ip);
1527  Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1528  // Restore ip is not needed. ip is reloaded below.
1529  pop(holder_reg); // Restore holder.
1530  // Restore ip to holder's context.
1531  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1532  }
1533 
1534  // Check that the security token in the calling global object is
1535  // compatible with the security token in the receiving global
1536  // object.
1537  int token_offset = Context::kHeaderSize +
1538  Context::SECURITY_TOKEN_INDEX * kPointerSize;
1539 
1540  ldr(scratch, FieldMemOperand(scratch, token_offset));
1541  ldr(ip, FieldMemOperand(ip, token_offset));
1542  cmp(scratch, Operand(ip));
1543  b(ne, miss);
1544 
1545  bind(&same_contexts);
1546 }
1547 
1548 
1549 // Compute the hash code from the untagged key. This must be kept in sync with
1550 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
1551 // code-stub-hydrogen.cc
1552 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1553  // First of all we assign the hash seed to scratch.
1554  LoadRoot(scratch, Heap::kHashSeedRootIndex);
1555  SmiUntag(scratch);
1556 
1557  // Xor original key with a seed.
1558  eor(t0, t0, Operand(scratch));
1559 
1560  // Compute the hash code from the untagged key. This must be kept in sync
1561  // with ComputeIntegerHash in utils.h.
1562  //
1563  // hash = ~hash + (hash << 15);
1564  mvn(scratch, Operand(t0));
1565  add(t0, scratch, Operand(t0, LSL, 15));
1566  // hash = hash ^ (hash >> 12);
1567  eor(t0, t0, Operand(t0, LSR, 12));
1568  // hash = hash + (hash << 2);
1569  add(t0, t0, Operand(t0, LSL, 2));
1570  // hash = hash ^ (hash >> 4);
1571  eor(t0, t0, Operand(t0, LSR, 4));
1572  // hash = hash * 2057;
1573  mov(scratch, Operand(t0, LSL, 11));
1574  add(t0, t0, Operand(t0, LSL, 3));
1575  add(t0, t0, scratch);
1576  // hash = hash ^ (hash >> 16);
1577  eor(t0, t0, Operand(t0, LSR, 16));
1578 }
1579 
1580 
1581 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1582  Register elements,
1583  Register key,
1584  Register result,
1585  Register t0,
1586  Register t1,
1587  Register t2) {
1588  // Register use:
1589  //
1590  // elements - holds the slow-case elements of the receiver on entry.
1591  // Unchanged unless 'result' is the same register.
1592  //
1593  // key - holds the smi key on entry.
1594  // Unchanged unless 'result' is the same register.
1595  //
1596  // result - holds the result on exit if the load succeeded.
1597  // Allowed to be the same as 'key' or 'result'.
1598  // Unchanged on bailout so 'key' or 'result' can be used
1599  // in further computation.
1600  //
1601  // Scratch registers:
1602  //
1603  // t0 - holds the untagged key on entry and holds the hash once computed.
1604  //
1605  // t1 - used to hold the capacity mask of the dictionary
1606  //
1607  // t2 - used for the index into the dictionary.
1608  Label done;
1609 
1610  GetNumberHash(t0, t1);
1611 
1612  // Compute the capacity mask.
1613  ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1614  SmiUntag(t1);
1615  sub(t1, t1, Operand(1));
1616 
1617  // Generate an unrolled loop that performs a few probes before giving up.
1618  for (int i = 0; i < kNumberDictionaryProbes; i++) {
1619  // Use t2 for index calculations and keep the hash intact in t0.
1620  mov(t2, t0);
1621  // Compute the masked index: (hash + i + i * i) & mask.
1622  if (i > 0) {
1623  add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1624  }
1625  and_(t2, t2, Operand(t1));
1626 
1627  // Scale the index by multiplying by the element size.
1628  ASSERT(SeededNumberDictionary::kEntrySize == 3);
1629  add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
1630 
1631  // Check if the key is identical to the name.
1632  add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1633  ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1634  cmp(key, Operand(ip));
1635  if (i != kNumberDictionaryProbes - 1) {
1636  b(eq, &done);
1637  } else {
1638  b(ne, miss);
1639  }
1640  }
1641 
1642  bind(&done);
1643  // Check that the value is a normal property.
1644  // t2: elements + (index * kPointerSize)
1645  const int kDetailsOffset =
1646  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1647  ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1648  tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1649  b(ne, miss);
1650 
1651  // Get the value at the masked, scaled index and return.
1652  const int kValueOffset =
1653  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1654  ldr(result, FieldMemOperand(t2, kValueOffset));
1655 }
1656 
1657 
1658 void MacroAssembler::Allocate(int object_size,
1659  Register result,
1660  Register scratch1,
1661  Register scratch2,
1662  Label* gc_required,
1664  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
1665  if (!FLAG_inline_new) {
1666  if (emit_debug_code()) {
1667  // Trash the registers to simulate an allocation failure.
1668  mov(result, Operand(0x7091));
1669  mov(scratch1, Operand(0x7191));
1670  mov(scratch2, Operand(0x7291));
1671  }
1672  jmp(gc_required);
1673  return;
1674  }
1675 
1676  ASSERT(!result.is(scratch1));
1677  ASSERT(!result.is(scratch2));
1678  ASSERT(!scratch1.is(scratch2));
1679  ASSERT(!scratch1.is(ip));
1680  ASSERT(!scratch2.is(ip));
1681 
1682  // Make object size into bytes.
1683  if ((flags & SIZE_IN_WORDS) != 0) {
1684  object_size *= kPointerSize;
1685  }
1686  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1687 
1688  // Check relative positions of allocation top and limit addresses.
1689  // The values must be adjacent in memory to allow the use of LDM.
1690  // Also, assert that the registers are numbered such that the values
1691  // are loaded in the correct order.
1692  ExternalReference allocation_top =
1693  AllocationUtils::GetAllocationTopReference(isolate(), flags);
1694  ExternalReference allocation_limit =
1695  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1696 
1697  intptr_t top =
1698  reinterpret_cast<intptr_t>(allocation_top.address());
1699  intptr_t limit =
1700  reinterpret_cast<intptr_t>(allocation_limit.address());
1701  ASSERT((limit - top) == kPointerSize);
1702  ASSERT(result.code() < ip.code());
1703 
1704  // Set up allocation top address register.
1705  Register topaddr = scratch1;
1706  mov(topaddr, Operand(allocation_top));
1707 
1708  // This code stores a temporary value in ip. This is OK, as the code below
1709  // does not need ip for implicit literal generation.
1710  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1711  // Load allocation top into result and allocation limit into ip.
1712  ldm(ia, topaddr, result.bit() | ip.bit());
1713  } else {
1714  if (emit_debug_code()) {
1715  // Assert that result actually contains top on entry. ip is used
1716  // immediately below so this use of ip does not cause difference with
1717  // respect to register content between debug and release mode.
1718  ldr(ip, MemOperand(topaddr));
1719  cmp(result, ip);
1720  Check(eq, kUnexpectedAllocationTop);
1721  }
1722  // Load allocation limit into ip. Result already contains allocation top.
1723  ldr(ip, MemOperand(topaddr, limit - top));
1724  }
1725 
1726  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1727  // Align the next allocation. Storing the filler map without checking top is
1728  // safe in new-space because the limit of the heap is aligned there.
1729  ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1731  and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1732  Label aligned;
1733  b(eq, &aligned);
1734  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1735  cmp(result, Operand(ip));
1736  b(hs, gc_required);
1737  }
1738  mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1739  str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1740  bind(&aligned);
1741  }
1742 
1743  // Calculate new top and bail out if new space is exhausted. Use result
1744  // to calculate the new top. We must preserve the ip register at this
1745  // point, so we cannot just use add().
1746  ASSERT(object_size > 0);
1747  Register source = result;
1748  Condition cond = al;
1749  int shift = 0;
1750  while (object_size != 0) {
1751  if (((object_size >> shift) & 0x03) == 0) {
1752  shift += 2;
1753  } else {
1754  int bits = object_size & (0xff << shift);
1755  object_size -= bits;
1756  shift += 8;
1757  Operand bits_operand(bits);
1758  ASSERT(bits_operand.is_single_instruction(this));
1759  add(scratch2, source, bits_operand, SetCC, cond);
1760  source = scratch2;
1761  cond = cc;
1762  }
1763  }
1764  b(cs, gc_required);
1765  cmp(scratch2, Operand(ip));
1766  b(hi, gc_required);
1767  str(scratch2, MemOperand(topaddr));
1768 
1769  // Tag object if requested.
1770  if ((flags & TAG_OBJECT) != 0) {
1771  add(result, result, Operand(kHeapObjectTag));
1772  }
1773 }
1774 
1775 
1776 void MacroAssembler::Allocate(Register object_size,
1777  Register result,
1778  Register scratch1,
1779  Register scratch2,
1780  Label* gc_required,
1781  AllocationFlags flags) {
1782  if (!FLAG_inline_new) {
1783  if (emit_debug_code()) {
1784  // Trash the registers to simulate an allocation failure.
1785  mov(result, Operand(0x7091));
1786  mov(scratch1, Operand(0x7191));
1787  mov(scratch2, Operand(0x7291));
1788  }
1789  jmp(gc_required);
1790  return;
1791  }
1792 
1793  // Assert that the register arguments are different and that none of
1794  // them are ip. ip is used explicitly in the code generated below.
1795  ASSERT(!result.is(scratch1));
1796  ASSERT(!result.is(scratch2));
1797  ASSERT(!scratch1.is(scratch2));
1798  ASSERT(!object_size.is(ip));
1799  ASSERT(!result.is(ip));
1800  ASSERT(!scratch1.is(ip));
1801  ASSERT(!scratch2.is(ip));
1802 
1803  // Check relative positions of allocation top and limit addresses.
1804  // The values must be adjacent in memory to allow the use of LDM.
1805  // Also, assert that the registers are numbered such that the values
1806  // are loaded in the correct order.
1807  ExternalReference allocation_top =
1808  AllocationUtils::GetAllocationTopReference(isolate(), flags);
1809  ExternalReference allocation_limit =
1810  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1811  intptr_t top =
1812  reinterpret_cast<intptr_t>(allocation_top.address());
1813  intptr_t limit =
1814  reinterpret_cast<intptr_t>(allocation_limit.address());
1815  ASSERT((limit - top) == kPointerSize);
1816  ASSERT(result.code() < ip.code());
1817 
1818  // Set up allocation top address.
1819  Register topaddr = scratch1;
1820  mov(topaddr, Operand(allocation_top));
1821 
1822  // This code stores a temporary value in ip. This is OK, as the code below
1823  // does not need ip for implicit literal generation.
1824  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1825  // Load allocation top into result and allocation limit into ip.
1826  ldm(ia, topaddr, result.bit() | ip.bit());
1827  } else {
1828  if (emit_debug_code()) {
1829  // Assert that result actually contains top on entry. ip is used
1830  // immediately below so this use of ip does not cause difference with
1831  // respect to register content between debug and release mode.
1832  ldr(ip, MemOperand(topaddr));
1833  cmp(result, ip);
1834  Check(eq, kUnexpectedAllocationTop);
1835  }
1836  // Load allocation limit into ip. Result already contains allocation top.
1837  ldr(ip, MemOperand(topaddr, limit - top));
1838  }
1839 
1840  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1841  // Align the next allocation. Storing the filler map without checking top is
1842  // safe in new-space because the limit of the heap is aligned there.
1843  ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1845  and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1846  Label aligned;
1847  b(eq, &aligned);
1848  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1849  cmp(result, Operand(ip));
1850  b(hs, gc_required);
1851  }
1852  mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1853  str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1854  bind(&aligned);
1855  }
1856 
1857  // Calculate new top and bail out if new space is exhausted. Use result
1858  // to calculate the new top. Object size may be in words so a shift is
1859  // required to get the number of bytes.
1860  if ((flags & SIZE_IN_WORDS) != 0) {
1861  add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1862  } else {
1863  add(scratch2, result, Operand(object_size), SetCC);
1864  }
1865  b(cs, gc_required);
1866  cmp(scratch2, Operand(ip));
1867  b(hi, gc_required);
1868 
1869  // Update allocation top. result temporarily holds the new top.
1870  if (emit_debug_code()) {
1871  tst(scratch2, Operand(kObjectAlignmentMask));
1872  Check(eq, kUnalignedAllocationInNewSpace);
1873  }
1874  str(scratch2, MemOperand(topaddr));
1875 
1876  // Tag object if requested.
1877  if ((flags & TAG_OBJECT) != 0) {
1878  add(result, result, Operand(kHeapObjectTag));
1879  }
1880 }
1881 
1882 
1883 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1884  Register scratch) {
1885  ExternalReference new_space_allocation_top =
1886  ExternalReference::new_space_allocation_top_address(isolate());
1887 
1888  // Make sure the object has no tag before resetting top.
1889  and_(object, object, Operand(~kHeapObjectTagMask));
1890 #ifdef DEBUG
1891  // Check that the object un-allocated is below the current top.
1892  mov(scratch, Operand(new_space_allocation_top));
1893  ldr(scratch, MemOperand(scratch));
1894  cmp(object, scratch);
1895  Check(lt, kUndoAllocationOfNonAllocatedMemory);
1896 #endif
1897  // Write the address of the object to un-allocate as the current top.
1898  mov(scratch, Operand(new_space_allocation_top));
1899  str(object, MemOperand(scratch));
1900 }
1901 
1902 
1903 void MacroAssembler::AllocateTwoByteString(Register result,
1904  Register length,
1905  Register scratch1,
1906  Register scratch2,
1907  Register scratch3,
1908  Label* gc_required) {
1909  // Calculate the number of bytes needed for the characters in the string while
1910  // observing object alignment.
1911  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1912  mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1913  add(scratch1, scratch1,
1914  Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1915  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1916 
1917  // Allocate two-byte string in new space.
1918  Allocate(scratch1,
1919  result,
1920  scratch2,
1921  scratch3,
1922  gc_required,
1923  TAG_OBJECT);
1924 
1925  // Set the map, length and hash field.
1926  InitializeNewString(result,
1927  length,
1928  Heap::kStringMapRootIndex,
1929  scratch1,
1930  scratch2);
1931 }
1932 
1933 
1934 void MacroAssembler::AllocateAsciiString(Register result,
1935  Register length,
1936  Register scratch1,
1937  Register scratch2,
1938  Register scratch3,
1939  Label* gc_required) {
1940  // Calculate the number of bytes needed for the characters in the string while
1941  // observing object alignment.
1942  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1943  ASSERT(kCharSize == 1);
1944  add(scratch1, length,
1945  Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1946  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1947 
1948  // Allocate ASCII string in new space.
1949  Allocate(scratch1,
1950  result,
1951  scratch2,
1952  scratch3,
1953  gc_required,
1954  TAG_OBJECT);
1955 
1956  // Set the map, length and hash field.
1957  InitializeNewString(result,
1958  length,
1959  Heap::kAsciiStringMapRootIndex,
1960  scratch1,
1961  scratch2);
1962 }
1963 
1964 
1965 void MacroAssembler::AllocateTwoByteConsString(Register result,
1966  Register length,
1967  Register scratch1,
1968  Register scratch2,
1969  Label* gc_required) {
1970  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1971  TAG_OBJECT);
1972 
1973  InitializeNewString(result,
1974  length,
1975  Heap::kConsStringMapRootIndex,
1976  scratch1,
1977  scratch2);
1978 }
1979 
1980 
1981 void MacroAssembler::AllocateAsciiConsString(Register result,
1982  Register length,
1983  Register scratch1,
1984  Register scratch2,
1985  Label* gc_required) {
1986  Label allocate_new_space, install_map;
1987  AllocationFlags flags = TAG_OBJECT;
1988 
1989  ExternalReference high_promotion_mode = ExternalReference::
1990  new_space_high_promotion_mode_active_address(isolate());
1991  mov(scratch1, Operand(high_promotion_mode));
1992  ldr(scratch1, MemOperand(scratch1, 0));
1993  cmp(scratch1, Operand::Zero());
1994  b(eq, &allocate_new_space);
1995 
1996  Allocate(ConsString::kSize,
1997  result,
1998  scratch1,
1999  scratch2,
2000  gc_required,
2001  static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
2002 
2003  jmp(&install_map);
2004 
2005  bind(&allocate_new_space);
2006  Allocate(ConsString::kSize,
2007  result,
2008  scratch1,
2009  scratch2,
2010  gc_required,
2011  flags);
2012 
2013  bind(&install_map);
2014 
2015  InitializeNewString(result,
2016  length,
2017  Heap::kConsAsciiStringMapRootIndex,
2018  scratch1,
2019  scratch2);
2020 }
2021 
2022 
2023 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2024  Register length,
2025  Register scratch1,
2026  Register scratch2,
2027  Label* gc_required) {
2028  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2029  TAG_OBJECT);
2030 
2031  InitializeNewString(result,
2032  length,
2033  Heap::kSlicedStringMapRootIndex,
2034  scratch1,
2035  scratch2);
2036 }
2037 
2038 
2039 void MacroAssembler::AllocateAsciiSlicedString(Register result,
2040  Register length,
2041  Register scratch1,
2042  Register scratch2,
2043  Label* gc_required) {
2044  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2045  TAG_OBJECT);
2046 
2047  InitializeNewString(result,
2048  length,
2049  Heap::kSlicedAsciiStringMapRootIndex,
2050  scratch1,
2051  scratch2);
2052 }
2053 
2054 
2055 void MacroAssembler::CompareObjectType(Register object,
2056  Register map,
2057  Register type_reg,
2058  InstanceType type) {
2059  const Register temp = type_reg.is(no_reg) ? ip : type_reg;
2060 
2061  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2062  CompareInstanceType(map, temp, type);
2063 }
2064 
2065 
2066 void MacroAssembler::CheckObjectTypeRange(Register object,
2067  Register map,
2068  InstanceType min_type,
2069  InstanceType max_type,
2070  Label* false_label) {
2071  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2072  STATIC_ASSERT(LAST_TYPE < 256);
2073  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2074  ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
2075  sub(ip, ip, Operand(min_type));
2076  cmp(ip, Operand(max_type - min_type));
2077  b(hi, false_label);
2078 }
2079 
2080 
2081 void MacroAssembler::CompareInstanceType(Register map,
2082  Register type_reg,
2083  InstanceType type) {
2084  // Registers map and type_reg can be ip. These two lines assert
2085  // that ip can be used with the two instructions (the constants
2086  // will never need ip).
2087  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2088  STATIC_ASSERT(LAST_TYPE < 256);
2089  ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2090  cmp(type_reg, Operand(type));
2091 }
2092 
2093 
2094 void MacroAssembler::CompareRoot(Register obj,
2095  Heap::RootListIndex index) {
2096  ASSERT(!obj.is(ip));
2097  LoadRoot(ip, index);
2098  cmp(obj, ip);
2099 }
2100 
2101 
2102 void MacroAssembler::CheckFastElements(Register map,
2103  Register scratch,
2104  Label* fail) {
2109  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2110  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2111  b(hi, fail);
2112 }
2113 
2114 
2115 void MacroAssembler::CheckFastObjectElements(Register map,
2116  Register scratch,
2117  Label* fail) {
2122  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2123  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2124  b(ls, fail);
2125  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2126  b(hi, fail);
2127 }
2128 
2129 
2130 void MacroAssembler::CheckFastSmiElements(Register map,
2131  Register scratch,
2132  Label* fail) {
2135  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2136  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2137  b(hi, fail);
2138 }
2139 
2140 
2141 void MacroAssembler::StoreNumberToDoubleElements(
2142  Register value_reg,
2143  Register key_reg,
2144  Register elements_reg,
2145  Register scratch1,
2146  LowDwVfpRegister double_scratch,
2147  Label* fail,
2148  int elements_offset) {
2149  Label smi_value, store;
2150 
2151  // Handle smi values specially.
2152  JumpIfSmi(value_reg, &smi_value);
2153 
2154  // Ensure that the object is a heap number
2155  CheckMap(value_reg,
2156  scratch1,
2157  isolate()->factory()->heap_number_map(),
2158  fail,
2160 
2161  vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2162  // Force a canonical NaN.
2163  if (emit_debug_code()) {
2164  vmrs(ip);
2165  tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2166  Assert(ne, kDefaultNaNModeNotSet);
2167  }
2168  VFPCanonicalizeNaN(double_scratch);
2169  b(&store);
2170 
2171  bind(&smi_value);
2172  SmiToDouble(double_scratch, value_reg);
2173 
2174  bind(&store);
2175  add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2176  vstr(double_scratch,
2177  FieldMemOperand(scratch1,
2178  FixedDoubleArray::kHeaderSize - elements_offset));
2179 }
2180 
2181 
2182 void MacroAssembler::CompareMap(Register obj,
2183  Register scratch,
2184  Handle<Map> map,
2185  Label* early_success) {
2186  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2187  CompareMap(scratch, map, early_success);
2188 }
2189 
2190 
2191 void MacroAssembler::CompareMap(Register obj_map,
2192  Handle<Map> map,
2193  Label* early_success) {
2194  cmp(obj_map, Operand(map));
2195 }
2196 
2197 
2198 void MacroAssembler::CheckMap(Register obj,
2199  Register scratch,
2200  Handle<Map> map,
2201  Label* fail,
2202  SmiCheckType smi_check_type) {
2203  if (smi_check_type == DO_SMI_CHECK) {
2204  JumpIfSmi(obj, fail);
2205  }
2206 
2207  Label success;
2208  CompareMap(obj, scratch, map, &success);
2209  b(ne, fail);
2210  bind(&success);
2211 }
2212 
2213 
2214 void MacroAssembler::CheckMap(Register obj,
2215  Register scratch,
2216  Heap::RootListIndex index,
2217  Label* fail,
2218  SmiCheckType smi_check_type) {
2219  if (smi_check_type == DO_SMI_CHECK) {
2220  JumpIfSmi(obj, fail);
2221  }
2222  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2223  LoadRoot(ip, index);
2224  cmp(scratch, ip);
2225  b(ne, fail);
2226 }
2227 
2228 
2229 void MacroAssembler::DispatchMap(Register obj,
2230  Register scratch,
2231  Handle<Map> map,
2232  Handle<Code> success,
2233  SmiCheckType smi_check_type) {
2234  Label fail;
2235  if (smi_check_type == DO_SMI_CHECK) {
2236  JumpIfSmi(obj, &fail);
2237  }
2238  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2239  mov(ip, Operand(map));
2240  cmp(scratch, ip);
2241  Jump(success, RelocInfo::CODE_TARGET, eq);
2242  bind(&fail);
2243 }
2244 
2245 
2246 void MacroAssembler::TryGetFunctionPrototype(Register function,
2247  Register result,
2248  Register scratch,
2249  Label* miss,
2250  bool miss_on_bound_function) {
2251  // Check that the receiver isn't a smi.
2252  JumpIfSmi(function, miss);
2253 
2254  // Check that the function really is a function. Load map into result reg.
2255  CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2256  b(ne, miss);
2257 
2258  if (miss_on_bound_function) {
2259  ldr(scratch,
2260  FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2261  ldr(scratch,
2262  FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2263  tst(scratch,
2264  Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2265  b(ne, miss);
2266  }
2267 
2268  // Make sure that the function has an instance prototype.
2269  Label non_instance;
2270  ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2271  tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2272  b(ne, &non_instance);
2273 
2274  // Get the prototype or initial map from the function.
2275  ldr(result,
2276  FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2277 
2278  // If the prototype or initial map is the hole, don't return it and
2279  // simply miss the cache instead. This will allow us to allocate a
2280  // prototype object on-demand in the runtime system.
2281  LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2282  cmp(result, ip);
2283  b(eq, miss);
2284 
2285  // If the function does not have an initial map, we're done.
2286  Label done;
2287  CompareObjectType(result, scratch, scratch, MAP_TYPE);
2288  b(ne, &done);
2289 
2290  // Get the prototype from the initial map.
2291  ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2292  jmp(&done);
2293 
2294  // Non-instance prototype: Fetch prototype from constructor field
2295  // in initial map.
2296  bind(&non_instance);
2297  ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2298 
2299  // All done.
2300  bind(&done);
2301 }
2302 
2303 
2304 void MacroAssembler::CallStub(CodeStub* stub,
2305  TypeFeedbackId ast_id,
2306  Condition cond) {
2307  ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2308  Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
2309 }
2310 
2311 
2312 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2313  Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
2314 }
2315 
2316 
2317 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2318  return ref0.address() - ref1.address();
2319 }
2320 
2321 
2322 void MacroAssembler::CallApiFunctionAndReturn(
2323  Register function_address,
2324  ExternalReference thunk_ref,
2325  int stack_space,
2326  MemOperand return_value_operand,
2327  MemOperand* context_restore_operand) {
2328  ExternalReference next_address =
2329  ExternalReference::handle_scope_next_address(isolate());
2330  const int kNextOffset = 0;
2331  const int kLimitOffset = AddressOffset(
2332  ExternalReference::handle_scope_limit_address(isolate()),
2333  next_address);
2334  const int kLevelOffset = AddressOffset(
2335  ExternalReference::handle_scope_level_address(isolate()),
2336  next_address);
2337 
2338  ASSERT(function_address.is(r1) || function_address.is(r2));
2339 
2340  Label profiler_disabled;
2341  Label end_profiler_check;
2342  bool* is_profiling_flag =
2343  isolate()->cpu_profiler()->is_profiling_address();
2344  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
2345  mov(r9, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
2346  ldrb(r9, MemOperand(r9, 0));
2347  cmp(r9, Operand(0));
2348  b(eq, &profiler_disabled);
2349 
2350  // Additional parameter is the address of the actual callback.
2351  mov(r3, Operand(thunk_ref));
2352  jmp(&end_profiler_check);
2353 
2354  bind(&profiler_disabled);
2355  Move(r3, function_address);
2356  bind(&end_profiler_check);
2357 
2358  // Allocate HandleScope in callee-save registers.
2359  mov(r9, Operand(next_address));
2360  ldr(r4, MemOperand(r9, kNextOffset));
2361  ldr(r5, MemOperand(r9, kLimitOffset));
2362  ldr(r6, MemOperand(r9, kLevelOffset));
2363  add(r6, r6, Operand(1));
2364  str(r6, MemOperand(r9, kLevelOffset));
2365 
2366  if (FLAG_log_timer_events) {
2367  FrameScope frame(this, StackFrame::MANUAL);
2368  PushSafepointRegisters();
2369  PrepareCallCFunction(1, r0);
2370  mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2371  CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2372  PopSafepointRegisters();
2373  }
2374 
2375  // Native call returns to the DirectCEntry stub which redirects to the
2376  // return address pushed on stack (could have moved after GC).
2377  // DirectCEntry stub itself is generated early and never moves.
2378  DirectCEntryStub stub;
2379  stub.GenerateCall(this, r3);
2380 
2381  if (FLAG_log_timer_events) {
2382  FrameScope frame(this, StackFrame::MANUAL);
2383  PushSafepointRegisters();
2384  PrepareCallCFunction(1, r0);
2385  mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2386  CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2387  PopSafepointRegisters();
2388  }
2389 
2390  Label promote_scheduled_exception;
2391  Label exception_handled;
2392  Label delete_allocated_handles;
2393  Label leave_exit_frame;
2394  Label return_value_loaded;
2395 
2396  // load value from ReturnValue
2397  ldr(r0, return_value_operand);
2398  bind(&return_value_loaded);
2399  // No more valid handles (the result handle was the last one). Restore
2400  // previous handle scope.
2401  str(r4, MemOperand(r9, kNextOffset));
2402  if (emit_debug_code()) {
2403  ldr(r1, MemOperand(r9, kLevelOffset));
2404  cmp(r1, r6);
2405  Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2406  }
2407  sub(r6, r6, Operand(1));
2408  str(r6, MemOperand(r9, kLevelOffset));
2409  ldr(ip, MemOperand(r9, kLimitOffset));
2410  cmp(r5, ip);
2411  b(ne, &delete_allocated_handles);
2412 
2413  // Check if the function scheduled an exception.
2414  bind(&leave_exit_frame);
2415  LoadRoot(r4, Heap::kTheHoleValueRootIndex);
2416  mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2417  ldr(r5, MemOperand(ip));
2418  cmp(r4, r5);
2419  b(ne, &promote_scheduled_exception);
2420  bind(&exception_handled);
2421 
2422  bool restore_context = context_restore_operand != NULL;
2423  if (restore_context) {
2424  ldr(cp, *context_restore_operand);
2425  }
2426  // LeaveExitFrame expects unwind space to be in a register.
2427  mov(r4, Operand(stack_space));
2428  LeaveExitFrame(false, r4, !restore_context);
2429  mov(pc, lr);
2430 
2431  bind(&promote_scheduled_exception);
2432  {
2433  FrameScope frame(this, StackFrame::INTERNAL);
2434  CallExternalReference(
2435  ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
2436  0);
2437  }
2438  jmp(&exception_handled);
2439 
2440  // HandleScope limit has changed. Delete allocated extensions.
2441  bind(&delete_allocated_handles);
2442  str(r5, MemOperand(r9, kLimitOffset));
2443  mov(r4, r0);
2444  PrepareCallCFunction(1, r5);
2445  mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2446  CallCFunction(
2447  ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2448  mov(r0, r4);
2449  jmp(&leave_exit_frame);
2450 }
2451 
2452 
2453 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2454  return has_frame_ || !stub->SometimesSetsUpAFrame();
2455 }
2456 
2457 
2458 void MacroAssembler::IllegalOperation(int num_arguments) {
2459  if (num_arguments > 0) {
2460  add(sp, sp, Operand(num_arguments * kPointerSize));
2461  }
2462  LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2463 }
2464 
2465 
2466 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2467  // If the hash field contains an array index pick it out. The assert checks
2468  // that the constants for the maximum number of digits for an array index
2469  // cached in the hash field and the number of bits reserved for it does not
2470  // conflict.
2471  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2472  (1 << String::kArrayIndexValueBits));
2473  // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
2474  // the low kHashShift bits.
2475  Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2476  SmiTag(index, hash);
2477 }
2478 
2479 
2480 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2481  if (CpuFeatures::IsSupported(VFP3)) {
2482  vmov(value.low(), smi);
2483  vcvt_f64_s32(value, 1);
2484  } else {
2485  SmiUntag(ip, smi);
2486  vmov(value.low(), ip);
2487  vcvt_f64_s32(value, value.low());
2488  }
2489 }
2490 
2491 
2492 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2493  LowDwVfpRegister double_scratch) {
2494  ASSERT(!double_input.is(double_scratch));
2495  vcvt_s32_f64(double_scratch.low(), double_input);
2496  vcvt_f64_s32(double_scratch, double_scratch.low());
2497  VFPCompareAndSetFlags(double_input, double_scratch);
2498 }
2499 
2500 
2501 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2502  DwVfpRegister double_input,
2503  LowDwVfpRegister double_scratch) {
2504  ASSERT(!double_input.is(double_scratch));
2505  vcvt_s32_f64(double_scratch.low(), double_input);
2506  vmov(result, double_scratch.low());
2507  vcvt_f64_s32(double_scratch, double_scratch.low());
2508  VFPCompareAndSetFlags(double_input, double_scratch);
2509 }
2510 
2511 
2512 void MacroAssembler::TryInt32Floor(Register result,
2513  DwVfpRegister double_input,
2514  Register input_high,
2515  LowDwVfpRegister double_scratch,
2516  Label* done,
2517  Label* exact) {
2518  ASSERT(!result.is(input_high));
2519  ASSERT(!double_input.is(double_scratch));
2520  Label negative, exception;
2521 
2522  VmovHigh(input_high, double_input);
2523 
2524  // Test for NaN and infinities.
2525  Sbfx(result, input_high,
2526  HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2527  cmp(result, Operand(-1));
2528  b(eq, &exception);
2529  // Test for values that can be exactly represented as a
2530  // signed 32-bit integer.
2531  TryDoubleToInt32Exact(result, double_input, double_scratch);
2532  // If exact, return (result already fetched).
2533  b(eq, exact);
2534  cmp(input_high, Operand::Zero());
2535  b(mi, &negative);
2536 
2537  // Input is in ]+0, +inf[.
2538  // If result equals 0x7fffffff input was out of range or
2539  // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2540  // could fits into an int32, that means we always think input was
2541  // out of range and always go to exception.
2542  // If result < 0x7fffffff, go to done, result fetched.
2543  cmn(result, Operand(1));
2544  b(mi, &exception);
2545  b(done);
2546 
2547  // Input is in ]-inf, -0[.
2548  // If x is a non integer negative number,
2549  // floor(x) <=> round_to_zero(x) - 1.
2550  bind(&negative);
2551  sub(result, result, Operand(1), SetCC);
2552  // If result is still negative, go to done, result fetched.
2553  // Else, we had an overflow and we fall through exception.
2554  b(mi, done);
2555  bind(&exception);
2556 }
2557 
2558 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2559  DwVfpRegister double_input,
2560  Label* done) {
2561  LowDwVfpRegister double_scratch = kScratchDoubleReg;
2562  vcvt_s32_f64(double_scratch.low(), double_input);
2563  vmov(result, double_scratch.low());
2564 
2565  // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2566  sub(ip, result, Operand(1));
2567  cmp(ip, Operand(0x7ffffffe));
2568  b(lt, done);
2569 }
2570 
2571 
2572 void MacroAssembler::TruncateDoubleToI(Register result,
2573  DwVfpRegister double_input) {
2574  Label done;
2575 
2576  TryInlineTruncateDoubleToI(result, double_input, &done);
2577 
2578  // If we fell through then inline version didn't succeed - call stub instead.
2579  push(lr);
2580  sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2581  vstr(double_input, MemOperand(sp, 0));
2582 
2583  DoubleToIStub stub(sp, result, 0, true, true);
2584  CallStub(&stub);
2585 
2586  add(sp, sp, Operand(kDoubleSize));
2587  pop(lr);
2588 
2589  bind(&done);
2590 }
2591 
2592 
2593 void MacroAssembler::TruncateHeapNumberToI(Register result,
2594  Register object) {
2595  Label done;
2596  LowDwVfpRegister double_scratch = kScratchDoubleReg;
2597  ASSERT(!result.is(object));
2598 
2599  vldr(double_scratch,
2600  MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2601  TryInlineTruncateDoubleToI(result, double_scratch, &done);
2602 
2603  // If we fell through then inline version didn't succeed - call stub instead.
2604  push(lr);
2605  DoubleToIStub stub(object,
2606  result,
2607  HeapNumber::kValueOffset - kHeapObjectTag,
2608  true,
2609  true);
2610  CallStub(&stub);
2611  pop(lr);
2612 
2613  bind(&done);
2614 }
2615 
2616 
2617 void MacroAssembler::TruncateNumberToI(Register object,
2618  Register result,
2619  Register heap_number_map,
2620  Register scratch1,
2621  Label* not_number) {
2622  Label done;
2623  ASSERT(!result.is(object));
2624 
2625  UntagAndJumpIfSmi(result, object, &done);
2626  JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2627  TruncateHeapNumberToI(result, object);
2628 
2629  bind(&done);
2630 }
2631 
2632 
2633 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2634  Register src,
2635  int num_least_bits) {
2636  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2637  ubfx(dst, src, kSmiTagSize, num_least_bits);
2638  } else {
2639  SmiUntag(dst, src);
2640  and_(dst, dst, Operand((1 << num_least_bits) - 1));
2641  }
2642 }
2643 
2644 
2645 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2646  Register src,
2647  int num_least_bits) {
2648  and_(dst, src, Operand((1 << num_least_bits) - 1));
2649 }
2650 
2651 
2652 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2653  int num_arguments,
2654  SaveFPRegsMode save_doubles) {
2655  // All parameters are on the stack. r0 has the return value after call.
2656 
2657  // If the expected number of arguments of the runtime function is
2658  // constant, we check that the actual number of arguments match the
2659  // expectation.
2660  if (f->nargs >= 0 && f->nargs != num_arguments) {
2661  IllegalOperation(num_arguments);
2662  return;
2663  }
2664 
2665  // TODO(1236192): Most runtime routines don't need the number of
2666  // arguments passed in because it is constant. At some point we
2667  // should remove this need and make the runtime routine entry code
2668  // smarter.
2669  mov(r0, Operand(num_arguments));
2670  mov(r1, Operand(ExternalReference(f, isolate())));
2671  CEntryStub stub(1, save_doubles);
2672  CallStub(&stub);
2673 }
2674 
2675 
2676 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2677  int num_arguments) {
2678  mov(r0, Operand(num_arguments));
2679  mov(r1, Operand(ext));
2680 
2681  CEntryStub stub(1);
2682  CallStub(&stub);
2683 }
2684 
2685 
2686 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2687  int num_arguments,
2688  int result_size) {
2689  // TODO(1236192): Most runtime routines don't need the number of
2690  // arguments passed in because it is constant. At some point we
2691  // should remove this need and make the runtime routine entry code
2692  // smarter.
2693  mov(r0, Operand(num_arguments));
2694  JumpToExternalReference(ext);
2695 }
2696 
2697 
2698 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2699  int num_arguments,
2700  int result_size) {
2701  TailCallExternalReference(ExternalReference(fid, isolate()),
2702  num_arguments,
2703  result_size);
2704 }
2705 
2706 
2707 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2708 #if defined(__thumb__)
2709  // Thumb mode builtin.
2710  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2711 #endif
2712  mov(r1, Operand(builtin));
2713  CEntryStub stub(1);
2714  Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
2715 }
2716 
2717 
2718 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2719  InvokeFlag flag,
2720  const CallWrapper& call_wrapper) {
2721  // You can't call a builtin without a valid frame.
2722  ASSERT(flag == JUMP_FUNCTION || has_frame());
2723 
2724  GetBuiltinEntry(r2, id);
2725  if (flag == CALL_FUNCTION) {
2726  call_wrapper.BeforeCall(CallSize(r2));
2727  Call(r2);
2728  call_wrapper.AfterCall();
2729  } else {
2730  ASSERT(flag == JUMP_FUNCTION);
2731  Jump(r2);
2732  }
2733 }
2734 
2735 
2736 void MacroAssembler::GetBuiltinFunction(Register target,
2737  Builtins::JavaScript id) {
2738  // Load the builtins object into target register.
2739  ldr(target,
2740  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2741  ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2742  // Load the JavaScript builtin function from the builtins object.
2743  ldr(target, FieldMemOperand(target,
2744  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2745 }
2746 
2747 
2748 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2749  ASSERT(!target.is(r1));
2750  GetBuiltinFunction(r1, id);
2751  // Load the code entry point from the builtins object.
2752  ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2753 }
2754 
2755 
2756 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2757  Register scratch1, Register scratch2) {
2758  if (FLAG_native_code_counters && counter->Enabled()) {
2759  mov(scratch1, Operand(value));
2760  mov(scratch2, Operand(ExternalReference(counter)));
2761  str(scratch1, MemOperand(scratch2));
2762  }
2763 }
2764 
2765 
2766 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2767  Register scratch1, Register scratch2) {
2768  ASSERT(value > 0);
2769  if (FLAG_native_code_counters && counter->Enabled()) {
2770  mov(scratch2, Operand(ExternalReference(counter)));
2771  ldr(scratch1, MemOperand(scratch2));
2772  add(scratch1, scratch1, Operand(value));
2773  str(scratch1, MemOperand(scratch2));
2774  }
2775 }
2776 
2777 
2778 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2779  Register scratch1, Register scratch2) {
2780  ASSERT(value > 0);
2781  if (FLAG_native_code_counters && counter->Enabled()) {
2782  mov(scratch2, Operand(ExternalReference(counter)));
2783  ldr(scratch1, MemOperand(scratch2));
2784  sub(scratch1, scratch1, Operand(value));
2785  str(scratch1, MemOperand(scratch2));
2786  }
2787 }
2788 
2789 
2790 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2791  if (emit_debug_code())
2792  Check(cond, reason);
2793 }
2794 
2795 
2796 void MacroAssembler::AssertFastElements(Register elements) {
2797  if (emit_debug_code()) {
2798  ASSERT(!elements.is(ip));
2799  Label ok;
2800  push(elements);
2801  ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2802  LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2803  cmp(elements, ip);
2804  b(eq, &ok);
2805  LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2806  cmp(elements, ip);
2807  b(eq, &ok);
2808  LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2809  cmp(elements, ip);
2810  b(eq, &ok);
2811  Abort(kJSObjectWithFastElementsMapHasSlowElements);
2812  bind(&ok);
2813  pop(elements);
2814  }
2815 }
2816 
2817 
2818 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2819  Label L;
2820  b(cond, &L);
2821  Abort(reason);
2822  // will not return here
2823  bind(&L);
2824 }
2825 
2826 
2827 void MacroAssembler::Abort(BailoutReason reason) {
2828  Label abort_start;
2829  bind(&abort_start);
2830 #ifdef DEBUG
2831  const char* msg = GetBailoutReason(reason);
2832  if (msg != NULL) {
2833  RecordComment("Abort message: ");
2834  RecordComment(msg);
2835  }
2836 
2837  if (FLAG_trap_on_abort) {
2838  stop(msg);
2839  return;
2840  }
2841 #endif
2842 
2843  mov(r0, Operand(Smi::FromInt(reason)));
2844  push(r0);
2845 
2846  // Disable stub call restrictions to always allow calls to abort.
2847  if (!has_frame_) {
2848  // We don't actually want to generate a pile of code for this, so just
2849  // claim there is a stack frame, without generating one.
2850  FrameScope scope(this, StackFrame::NONE);
2851  CallRuntime(Runtime::kAbort, 1);
2852  } else {
2853  CallRuntime(Runtime::kAbort, 1);
2854  }
2855  // will not return here
2856  if (is_const_pool_blocked()) {
2857  // If the calling code cares about the exact number of
2858  // instructions generated, we insert padding here to keep the size
2859  // of the Abort macro constant.
2860  static const int kExpectedAbortInstructions = 7;
2861  int abort_instructions = InstructionsGeneratedSince(&abort_start);
2862  ASSERT(abort_instructions <= kExpectedAbortInstructions);
2863  while (abort_instructions++ < kExpectedAbortInstructions) {
2864  nop();
2865  }
2866  }
2867 }
2868 
2869 
2870 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2871  if (context_chain_length > 0) {
2872  // Move up the chain of contexts to the context containing the slot.
2873  ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2874  for (int i = 1; i < context_chain_length; i++) {
2875  ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2876  }
2877  } else {
2878  // Slot is in the current function context. Move it into the
2879  // destination register in case we store into it (the write barrier
2880  // cannot be allowed to destroy the context in esi).
2881  mov(dst, cp);
2882  }
2883 }
2884 
2885 
2886 void MacroAssembler::LoadTransitionedArrayMapConditional(
2887  ElementsKind expected_kind,
2888  ElementsKind transitioned_kind,
2889  Register map_in_out,
2890  Register scratch,
2891  Label* no_map_match) {
2892  // Load the global or builtins object from the current context.
2893  ldr(scratch,
2894  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2895  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2896 
2897  // Check that the function's map is the same as the expected cached map.
2898  ldr(scratch,
2899  MemOperand(scratch,
2900  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2901  size_t offset = expected_kind * kPointerSize +
2902  FixedArrayBase::kHeaderSize;
2903  ldr(ip, FieldMemOperand(scratch, offset));
2904  cmp(map_in_out, ip);
2905  b(ne, no_map_match);
2906 
2907  // Use the transitioned cached map.
2908  offset = transitioned_kind * kPointerSize +
2909  FixedArrayBase::kHeaderSize;
2910  ldr(map_in_out, FieldMemOperand(scratch, offset));
2911 }
2912 
2913 
2914 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2915  // Load the global or builtins object from the current context.
2916  ldr(function,
2917  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2918  // Load the native context from the global or builtins object.
2919  ldr(function, FieldMemOperand(function,
2920  GlobalObject::kNativeContextOffset));
2921  // Load the function from the native context.
2922  ldr(function, MemOperand(function, Context::SlotOffset(index)));
2923 }
2924 
2925 
2926 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2927  Register map,
2928  Register scratch) {
2929  // Load the initial map. The global functions all have initial maps.
2930  ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2931  if (emit_debug_code()) {
2932  Label ok, fail;
2933  CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2934  b(&ok);
2935  bind(&fail);
2936  Abort(kGlobalFunctionsMustHaveInitialMap);
2937  bind(&ok);
2938  }
2939 }
2940 
2941 
2942 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2943  Register reg,
2944  Register scratch,
2945  Label* not_power_of_two_or_zero) {
2946  sub(scratch, reg, Operand(1), SetCC);
2947  b(mi, not_power_of_two_or_zero);
2948  tst(scratch, reg);
2949  b(ne, not_power_of_two_or_zero);
2950 }
2951 
2952 
2953 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2954  Register reg,
2955  Register scratch,
2956  Label* zero_and_neg,
2957  Label* not_power_of_two) {
2958  sub(scratch, reg, Operand(1), SetCC);
2959  b(mi, zero_and_neg);
2960  tst(scratch, reg);
2961  b(ne, not_power_of_two);
2962 }
2963 
2964 
2965 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2966  Register reg2,
2967  Label* on_not_both_smi) {
2968  STATIC_ASSERT(kSmiTag == 0);
2969  tst(reg1, Operand(kSmiTagMask));
2970  tst(reg2, Operand(kSmiTagMask), eq);
2971  b(ne, on_not_both_smi);
2972 }
2973 
2974 
2975 void MacroAssembler::UntagAndJumpIfSmi(
2976  Register dst, Register src, Label* smi_case) {
2977  STATIC_ASSERT(kSmiTag == 0);
2978  SmiUntag(dst, src, SetCC);
2979  b(cc, smi_case); // Shifter carry is not set for a smi.
2980 }
2981 
2982 
2983 void MacroAssembler::UntagAndJumpIfNotSmi(
2984  Register dst, Register src, Label* non_smi_case) {
2985  STATIC_ASSERT(kSmiTag == 0);
2986  SmiUntag(dst, src, SetCC);
2987  b(cs, non_smi_case); // Shifter carry is set for a non-smi.
2988 }
2989 
2990 
2991 void MacroAssembler::JumpIfEitherSmi(Register reg1,
2992  Register reg2,
2993  Label* on_either_smi) {
2994  STATIC_ASSERT(kSmiTag == 0);
2995  tst(reg1, Operand(kSmiTagMask));
2996  tst(reg2, Operand(kSmiTagMask), ne);
2997  b(eq, on_either_smi);
2998 }
2999 
3000 
3001 void MacroAssembler::AssertNotSmi(Register object) {
3002  if (emit_debug_code()) {
3003  STATIC_ASSERT(kSmiTag == 0);
3004  tst(object, Operand(kSmiTagMask));
3005  Check(ne, kOperandIsASmi);
3006  }
3007 }
3008 
3009 
3010 void MacroAssembler::AssertSmi(Register object) {
3011  if (emit_debug_code()) {
3012  STATIC_ASSERT(kSmiTag == 0);
3013  tst(object, Operand(kSmiTagMask));
3014  Check(eq, kOperandIsNotSmi);
3015  }
3016 }
3017 
3018 
3019 void MacroAssembler::AssertString(Register object) {
3020  if (emit_debug_code()) {
3021  STATIC_ASSERT(kSmiTag == 0);
3022  tst(object, Operand(kSmiTagMask));
3023  Check(ne, kOperandIsASmiAndNotAString);
3024  push(object);
3025  ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3026  CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3027  pop(object);
3028  Check(lo, kOperandIsNotAString);
3029  }
3030 }
3031 
3032 
3033 void MacroAssembler::AssertName(Register object) {
3034  if (emit_debug_code()) {
3035  STATIC_ASSERT(kSmiTag == 0);
3036  tst(object, Operand(kSmiTagMask));
3037  Check(ne, kOperandIsASmiAndNotAName);
3038  push(object);
3039  ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3040  CompareInstanceType(object, object, LAST_NAME_TYPE);
3041  pop(object);
3042  Check(le, kOperandIsNotAName);
3043  }
3044 }
3045 
3046 
3047 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
3048  Register scratch) {
3049  if (emit_debug_code()) {
3050  Label done_checking;
3051  AssertNotSmi(object);
3052  CompareRoot(object, Heap::kUndefinedValueRootIndex);
3053  b(eq, &done_checking);
3054  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3055  CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
3056  Assert(eq, kExpectedUndefinedOrCell);
3057  bind(&done_checking);
3058  }
3059 }
3060 
3061 
3062 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3063  if (emit_debug_code()) {
3064  CompareRoot(reg, index);
3065  Check(eq, kHeapNumberMapRegisterClobbered);
3066  }
3067 }
3068 
3069 
3070 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3071  Register heap_number_map,
3072  Register scratch,
3073  Label* on_not_heap_number) {
3074  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3075  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3076  cmp(scratch, heap_number_map);
3077  b(ne, on_not_heap_number);
3078 }
3079 
3080 
3081 void MacroAssembler::LookupNumberStringCache(Register object,
3082  Register result,
3083  Register scratch1,
3084  Register scratch2,
3085  Register scratch3,
3086  Label* not_found) {
3087  // Use of registers. Register result is used as a temporary.
3088  Register number_string_cache = result;
3089  Register mask = scratch3;
3090 
3091  // Load the number string cache.
3092  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3093 
3094  // Make the hash mask from the length of the number string cache. It
3095  // contains two elements (number and string) for each cache entry.
3096  ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
3097  // Divide length by two (length is a smi).
3098  mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
3099  sub(mask, mask, Operand(1)); // Make mask.
3100 
3101  // Calculate the entry in the number string cache. The hash value in the
3102  // number string cache for smis is just the smi value, and the hash for
3103  // doubles is the xor of the upper and lower words. See
3104  // Heap::GetNumberStringCache.
3105  Label is_smi;
3106  Label load_result_from_cache;
3107  JumpIfSmi(object, &is_smi);
3108  CheckMap(object,
3109  scratch1,
3110  Heap::kHeapNumberMapRootIndex,
3111  not_found,
3113 
3114  STATIC_ASSERT(8 == kDoubleSize);
3115  add(scratch1,
3116  object,
3117  Operand(HeapNumber::kValueOffset - kHeapObjectTag));
3118  ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
3119  eor(scratch1, scratch1, Operand(scratch2));
3120  and_(scratch1, scratch1, Operand(mask));
3121 
3122  // Calculate address of entry in string cache: each entry consists
3123  // of two pointer sized fields.
3124  add(scratch1,
3125  number_string_cache,
3126  Operand(scratch1, LSL, kPointerSizeLog2 + 1));
3127 
3128  Register probe = mask;
3129  ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3130  JumpIfSmi(probe, not_found);
3131  sub(scratch2, object, Operand(kHeapObjectTag));
3132  vldr(d0, scratch2, HeapNumber::kValueOffset);
3133  sub(probe, probe, Operand(kHeapObjectTag));
3134  vldr(d1, probe, HeapNumber::kValueOffset);
3135  VFPCompareAndSetFlags(d0, d1);
3136  b(ne, not_found); // The cache did not contain this value.
3137  b(&load_result_from_cache);
3138 
3139  bind(&is_smi);
3140  Register scratch = scratch1;
3141  and_(scratch, mask, Operand(object, ASR, 1));
3142  // Calculate address of entry in string cache: each entry consists
3143  // of two pointer sized fields.
3144  add(scratch,
3145  number_string_cache,
3146  Operand(scratch, LSL, kPointerSizeLog2 + 1));
3147 
3148  // Check if the entry is the smi we are looking for.
3149  ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3150  cmp(object, probe);
3151  b(ne, not_found);
3152 
3153  // Get the result from the cache.
3154  bind(&load_result_from_cache);
3155  ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
3156  IncrementCounter(isolate()->counters()->number_to_string_native(),
3157  1,
3158  scratch1,
3159  scratch2);
3160 }
3161 
3162 
3163 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3164  Register first,
3165  Register second,
3166  Register scratch1,
3167  Register scratch2,
3168  Label* failure) {
3169  // Test that both first and second are sequential ASCII strings.
3170  // Assume that they are non-smis.
3171  ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3172  ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3173  ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3174  ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3175 
3176  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3177  scratch2,
3178  scratch1,
3179  scratch2,
3180  failure);
3181 }
3182 
3183 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3184  Register second,
3185  Register scratch1,
3186  Register scratch2,
3187  Label* failure) {
3188  // Check that neither is a smi.
3189  and_(scratch1, first, Operand(second));
3190  JumpIfSmi(scratch1, failure);
3191  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3192  second,
3193  scratch1,
3194  scratch2,
3195  failure);
3196 }
3197 
3198 
3199 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3200  Label* not_unique_name) {
3202  Label succeed;
3203  tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3204  b(eq, &succeed);
3205  cmp(reg, Operand(SYMBOL_TYPE));
3206  b(ne, not_unique_name);
3207 
3208  bind(&succeed);
3209 }
3210 
3211 
3212 // Allocates a heap number or jumps to the need_gc label if the young space
3213 // is full and a scavenge is needed.
3214 void MacroAssembler::AllocateHeapNumber(Register result,
3215  Register scratch1,
3216  Register scratch2,
3217  Register heap_number_map,
3218  Label* gc_required,
3219  TaggingMode tagging_mode) {
3220  // Allocate an object in the heap for the heap number and tag it as a heap
3221  // object.
3222  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3223  tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3224 
3225  // Store heap number map in the allocated object.
3226  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3227  if (tagging_mode == TAG_RESULT) {
3228  str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3229  } else {
3230  str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3231  }
3232 }
3233 
3234 
3235 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3236  DwVfpRegister value,
3237  Register scratch1,
3238  Register scratch2,
3239  Register heap_number_map,
3240  Label* gc_required) {
3241  AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3242  sub(scratch1, result, Operand(kHeapObjectTag));
3243  vstr(value, scratch1, HeapNumber::kValueOffset);
3244 }
3245 
3246 
3247 // Copies a fixed number of fields of heap objects from src to dst.
3248 void MacroAssembler::CopyFields(Register dst,
3249  Register src,
3250  LowDwVfpRegister double_scratch,
3251  int field_count) {
3252  int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
3253  for (int i = 0; i < double_count; i++) {
3254  vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
3255  vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3256  }
3257 
3258  STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
3259  STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
3260 
3261  int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
3262  if (remain != 0) {
3263  vldr(double_scratch.low(),
3264  FieldMemOperand(src, (field_count - 1) * kPointerSize));
3265  vstr(double_scratch.low(),
3266  FieldMemOperand(dst, (field_count - 1) * kPointerSize));
3267  }
3268 }
3269 
3270 
3271 void MacroAssembler::CopyBytes(Register src,
3272  Register dst,
3273  Register length,
3274  Register scratch) {
3275  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3276 
3277  // Align src before copying in word size chunks.
3278  cmp(length, Operand(kPointerSize));
3279  b(le, &byte_loop);
3280 
3281  bind(&align_loop_1);
3282  tst(src, Operand(kPointerSize - 1));
3283  b(eq, &word_loop);
3284  ldrb(scratch, MemOperand(src, 1, PostIndex));
3285  strb(scratch, MemOperand(dst, 1, PostIndex));
3286  sub(length, length, Operand(1), SetCC);
3287  b(&align_loop_1);
3288  // Copy bytes in word size chunks.
3289  bind(&word_loop);
3290  if (emit_debug_code()) {
3291  tst(src, Operand(kPointerSize - 1));
3292  Assert(eq, kExpectingAlignmentForCopyBytes);
3293  }
3294  cmp(length, Operand(kPointerSize));
3295  b(lt, &byte_loop);
3296  ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3297  if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3298  str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3299  } else {
3300  strb(scratch, MemOperand(dst, 1, PostIndex));
3301  mov(scratch, Operand(scratch, LSR, 8));
3302  strb(scratch, MemOperand(dst, 1, PostIndex));
3303  mov(scratch, Operand(scratch, LSR, 8));
3304  strb(scratch, MemOperand(dst, 1, PostIndex));
3305  mov(scratch, Operand(scratch, LSR, 8));
3306  strb(scratch, MemOperand(dst, 1, PostIndex));
3307  }
3308  sub(length, length, Operand(kPointerSize));
3309  b(&word_loop);
3310 
3311  // Copy the last bytes if any left.
3312  bind(&byte_loop);
3313  cmp(length, Operand::Zero());
3314  b(eq, &done);
3315  bind(&byte_loop_1);
3316  ldrb(scratch, MemOperand(src, 1, PostIndex));
3317  strb(scratch, MemOperand(dst, 1, PostIndex));
3318  sub(length, length, Operand(1), SetCC);
3319  b(ne, &byte_loop_1);
3320  bind(&done);
3321 }
3322 
3323 
3324 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3325  Register end_offset,
3326  Register filler) {
3327  Label loop, entry;
3328  b(&entry);
3329  bind(&loop);
3330  str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3331  bind(&entry);
3332  cmp(start_offset, end_offset);
3333  b(lt, &loop);
3334 }
3335 
3336 
3337 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3338  mov(scratch, Operand(ExternalReference::cpu_features()));
3339  ldr(scratch, MemOperand(scratch));
3340  tst(scratch, Operand(1u << VFP32DREGS));
3341 }
3342 
3343 
3344 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3345  CheckFor32DRegs(scratch);
3346  vstm(db_w, location, d16, d31, ne);
3347  sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3348  vstm(db_w, location, d0, d15);
3349 }
3350 
3351 
3352 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3353  CheckFor32DRegs(scratch);
3354  vldm(ia_w, location, d0, d15);
3355  vldm(ia_w, location, d16, d31, ne);
3356  add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3357 }
3358 
3359 
3360 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3361  Register first,
3362  Register second,
3363  Register scratch1,
3364  Register scratch2,
3365  Label* failure) {
3366  const int kFlatAsciiStringMask =
3368  const int kFlatAsciiStringTag =
3370  and_(scratch1, first, Operand(kFlatAsciiStringMask));
3371  and_(scratch2, second, Operand(kFlatAsciiStringMask));
3372  cmp(scratch1, Operand(kFlatAsciiStringTag));
3373  // Ignore second test if first test failed.
3374  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
3375  b(ne, failure);
3376 }
3377 
3378 
3379 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3380  Register scratch,
3381  Label* failure) {
3382  const int kFlatAsciiStringMask =
3384  const int kFlatAsciiStringTag =
3386  and_(scratch, type, Operand(kFlatAsciiStringMask));
3387  cmp(scratch, Operand(kFlatAsciiStringTag));
3388  b(ne, failure);
3389 }
3390 
3391 static const int kRegisterPassedArguments = 4;
3392 
3393 
3394 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3395  int num_double_arguments) {
3396  int stack_passed_words = 0;
3397  if (use_eabi_hardfloat()) {
3398  // In the hard floating point calling convention, we can use
3399  // all double registers to pass doubles.
3400  if (num_double_arguments > DoubleRegister::NumRegisters()) {
3401  stack_passed_words +=
3402  2 * (num_double_arguments - DoubleRegister::NumRegisters());
3403  }
3404  } else {
3405  // In the soft floating point calling convention, every double
3406  // argument is passed using two registers.
3407  num_reg_arguments += 2 * num_double_arguments;
3408  }
3409  // Up to four simple arguments are passed in registers r0..r3.
3410  if (num_reg_arguments > kRegisterPassedArguments) {
3411  stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3412  }
3413  return stack_passed_words;
3414 }
3415 
3416 
3417 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3418  Register index,
3419  Register value,
3420  uint32_t encoding_mask) {
3421  Label is_object;
3422  SmiTst(string);
3423  Check(ne, kNonObject);
3424 
3425  ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3426  ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3427 
3429  cmp(ip, Operand(encoding_mask));
3430  Check(eq, kUnexpectedStringType);
3431 
3432  // The index is assumed to be untagged coming in, tag it to compare with the
3433  // string length without using a temp register, it is restored at the end of
3434  // this function.
3435  Label index_tag_ok, index_tag_bad;
3436  TrySmiTag(index, index, &index_tag_bad);
3437  b(&index_tag_ok);
3438  bind(&index_tag_bad);
3439  Abort(kIndexIsTooLarge);
3440  bind(&index_tag_ok);
3441 
3442  ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3443  cmp(index, ip);
3444  Check(lt, kIndexIsTooLarge);
3445 
3446  cmp(index, Operand(Smi::FromInt(0)));
3447  Check(ge, kIndexIsNegative);
3448 
3449  SmiUntag(index, index);
3450 }
3451 
3452 
3453 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3454  int num_double_arguments,
3455  Register scratch) {
3456  int frame_alignment = ActivationFrameAlignment();
3457  int stack_passed_arguments = CalculateStackPassedWords(
3458  num_reg_arguments, num_double_arguments);
3459  if (frame_alignment > kPointerSize) {
3460  // Make stack end at alignment and make room for num_arguments - 4 words
3461  // and the original value of sp.
3462  mov(scratch, sp);
3463  sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3464  ASSERT(IsPowerOf2(frame_alignment));
3465  and_(sp, sp, Operand(-frame_alignment));
3466  str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3467  } else {
3468  sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3469  }
3470 }
3471 
3472 
3473 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3474  Register scratch) {
3475  PrepareCallCFunction(num_reg_arguments, 0, scratch);
3476 }
3477 
3478 
3479 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3480  ASSERT(src.is(d0));
3481  if (!use_eabi_hardfloat()) {
3482  vmov(r0, r1, src);
3483  }
3484 }
3485 
3486 
3487 // On ARM this is just a synonym to make the purpose clear.
3488 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3489  MovToFloatParameter(src);
3490 }
3491 
3492 
3493 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3494  DwVfpRegister src2) {
3495  ASSERT(src1.is(d0));
3496  ASSERT(src2.is(d1));
3497  if (!use_eabi_hardfloat()) {
3498  vmov(r0, r1, src1);
3499  vmov(r2, r3, src2);
3500  }
3501 }
3502 
3503 
3504 void MacroAssembler::CallCFunction(ExternalReference function,
3505  int num_reg_arguments,
3506  int num_double_arguments) {
3507  mov(ip, Operand(function));
3508  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3509 }
3510 
3511 
3512 void MacroAssembler::CallCFunction(Register function,
3513  int num_reg_arguments,
3514  int num_double_arguments) {
3515  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3516 }
3517 
3518 
3519 void MacroAssembler::CallCFunction(ExternalReference function,
3520  int num_arguments) {
3521  CallCFunction(function, num_arguments, 0);
3522 }
3523 
3524 
3525 void MacroAssembler::CallCFunction(Register function,
3526  int num_arguments) {
3527  CallCFunction(function, num_arguments, 0);
3528 }
3529 
3530 
3531 void MacroAssembler::CallCFunctionHelper(Register function,
3532  int num_reg_arguments,
3533  int num_double_arguments) {
3534  ASSERT(has_frame());
3535  // Make sure that the stack is aligned before calling a C function unless
3536  // running in the simulator. The simulator has its own alignment check which
3537  // provides more information.
3538 #if V8_HOST_ARCH_ARM
3539  if (emit_debug_code()) {
3540  int frame_alignment = OS::ActivationFrameAlignment();
3541  int frame_alignment_mask = frame_alignment - 1;
3542  if (frame_alignment > kPointerSize) {
3543  ASSERT(IsPowerOf2(frame_alignment));
3544  Label alignment_as_expected;
3545  tst(sp, Operand(frame_alignment_mask));
3546  b(eq, &alignment_as_expected);
3547  // Don't use Check here, as it will call Runtime_Abort possibly
3548  // re-entering here.
3549  stop("Unexpected alignment");
3550  bind(&alignment_as_expected);
3551  }
3552  }
3553 #endif
3554 
3555  // Just call directly. The function called cannot cause a GC, or
3556  // allow preemption, so the return address in the link register
3557  // stays correct.
3558  Call(function);
3559  int stack_passed_arguments = CalculateStackPassedWords(
3560  num_reg_arguments, num_double_arguments);
3561  if (ActivationFrameAlignment() > kPointerSize) {
3562  ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3563  } else {
3564  add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3565  }
3566 }
3567 
3568 
3569 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3570  Register result) {
3571  const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3572  ldr(result, MemOperand(ldr_location));
3573  if (emit_debug_code()) {
3574  // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
3575  if (FLAG_enable_ool_constant_pool) {
3576  and_(result, result, Operand(kLdrPpPattern));
3577  cmp(result, Operand(kLdrPpPattern));
3578  Check(eq, kTheInstructionToPatchShouldBeALoadFromPp);
3579  } else {
3580  and_(result, result, Operand(kLdrPCPattern));
3581  cmp(result, Operand(kLdrPCPattern));
3582  Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
3583  }
3584  // Result was clobbered. Restore it.
3585  ldr(result, MemOperand(ldr_location));
3586  }
3587  // Get the address of the constant.
3588  and_(result, result, Operand(kLdrOffsetMask));
3589  if (FLAG_enable_ool_constant_pool) {
3590  add(result, pp, Operand(result));
3591  } else {
3592  add(result, ldr_location, Operand(result));
3593  add(result, result, Operand(Instruction::kPCReadOffset));
3594  }
3595 }
3596 
3597 
3598 void MacroAssembler::CheckPageFlag(
3599  Register object,
3600  Register scratch,
3601  int mask,
3602  Condition cc,
3603  Label* condition_met) {
3604  Bfc(scratch, object, 0, kPageSizeBits);
3605  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3606  tst(scratch, Operand(mask));
3607  b(cc, condition_met);
3608 }
3609 
3610 
3611 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3612  Register scratch,
3613  Label* if_deprecated) {
3614  if (map->CanBeDeprecated()) {
3615  mov(scratch, Operand(map));
3616  ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3617  tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
3618  b(ne, if_deprecated);
3619  }
3620 }
3621 
3622 
3623 void MacroAssembler::JumpIfBlack(Register object,
3624  Register scratch0,
3625  Register scratch1,
3626  Label* on_black) {
3627  HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3628  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3629 }
3630 
3631 
3632 void MacroAssembler::HasColor(Register object,
3633  Register bitmap_scratch,
3634  Register mask_scratch,
3635  Label* has_color,
3636  int first_bit,
3637  int second_bit) {
3638  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3639 
3640  GetMarkBits(object, bitmap_scratch, mask_scratch);
3641 
3642  Label other_color, word_boundary;
3643  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3644  tst(ip, Operand(mask_scratch));
3645  b(first_bit == 1 ? eq : ne, &other_color);
3646  // Shift left 1 by adding.
3647  add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3648  b(eq, &word_boundary);
3649  tst(ip, Operand(mask_scratch));
3650  b(second_bit == 1 ? ne : eq, has_color);
3651  jmp(&other_color);
3652 
3653  bind(&word_boundary);
3654  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3655  tst(ip, Operand(1));
3656  b(second_bit == 1 ? ne : eq, has_color);
3657  bind(&other_color);
3658 }
3659 
3660 
3661 // Detect some, but not all, common pointer-free objects. This is used by the
3662 // incremental write barrier which doesn't care about oddballs (they are always
3663 // marked black immediately so this code is not hit).
3664 void MacroAssembler::JumpIfDataObject(Register value,
3665  Register scratch,
3666  Label* not_data_object) {
3667  Label is_data_object;
3668  ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3669  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3670  b(eq, &is_data_object);
3672  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3673  // If it's a string and it's not a cons string then it's an object containing
3674  // no GC pointers.
3675  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3676  tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3677  b(ne, not_data_object);
3678  bind(&is_data_object);
3679 }
3680 
3681 
3682 void MacroAssembler::GetMarkBits(Register addr_reg,
3683  Register bitmap_reg,
3684  Register mask_reg) {
3685  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3686  and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3687  Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3688  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3689  Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3690  add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3691  mov(ip, Operand(1));
3692  mov(mask_reg, Operand(ip, LSL, mask_reg));
3693 }
3694 
3695 
3696 void MacroAssembler::EnsureNotWhite(
3697  Register value,
3698  Register bitmap_scratch,
3699  Register mask_scratch,
3700  Register load_scratch,
3701  Label* value_is_white_and_not_data) {
3702  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3703  GetMarkBits(value, bitmap_scratch, mask_scratch);
3704 
3705  // If the value is black or grey we don't need to do anything.
3706  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3707  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3708  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
3709  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3710 
3711  Label done;
3712 
3713  // Since both black and grey have a 1 in the first position and white does
3714  // not have a 1 there we only need to check one bit.
3715  ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3716  tst(mask_scratch, load_scratch);
3717  b(ne, &done);
3718 
3719  if (emit_debug_code()) {
3720  // Check for impossible bit pattern.
3721  Label ok;
3722  // LSL may overflow, making the check conservative.
3723  tst(load_scratch, Operand(mask_scratch, LSL, 1));
3724  b(eq, &ok);
3725  stop("Impossible marking bit pattern");
3726  bind(&ok);
3727  }
3728 
3729  // Value is white. We check whether it is data that doesn't need scanning.
3730  // Currently only checks for HeapNumber and non-cons strings.
3731  Register map = load_scratch; // Holds map while checking type.
3732  Register length = load_scratch; // Holds length of object after testing type.
3733  Label is_data_object;
3734 
3735  // Check for heap-number
3736  ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3737  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3738  mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3739  b(eq, &is_data_object);
3740 
3741  // Check for strings.
3743  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3744  // If it's a string and it's not a cons string then it's an object containing
3745  // no GC pointers.
3746  Register instance_type = load_scratch;
3747  ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3748  tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3749  b(ne, value_is_white_and_not_data);
3750  // It's a non-indirect (non-cons and non-slice) string.
3751  // If it's external, the length is just ExternalString::kSize.
3752  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3753  // External strings are the only ones with the kExternalStringTag bit
3754  // set.
3757  tst(instance_type, Operand(kExternalStringTag));
3758  mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3759  b(ne, &is_data_object);
3760 
3761  // Sequential string, either ASCII or UC16.
3762  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
3763  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3764  // getting the length multiplied by 2.
3766  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3767  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3768  tst(instance_type, Operand(kStringEncodingMask));
3769  mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3770  add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3771  and_(length, length, Operand(~kObjectAlignmentMask));
3772 
3773  bind(&is_data_object);
3774  // Value is a data object, and it is white. Mark it black. Since we know
3775  // that the object is white we can make it black by flipping one bit.
3776  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3777  orr(ip, ip, Operand(mask_scratch));
3778  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3779 
3780  and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3781  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3782  add(ip, ip, Operand(length));
3783  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3784 
3785  bind(&done);
3786 }
3787 
3788 
3789 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3790  Usat(output_reg, 8, Operand(input_reg));
3791 }
3792 
3793 
3794 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3795  DwVfpRegister input_reg,
3796  LowDwVfpRegister double_scratch) {
3797  Label above_zero;
3798  Label done;
3799  Label in_bounds;
3800 
3801  VFPCompareAndSetFlags(input_reg, 0.0);
3802  b(gt, &above_zero);
3803 
3804  // Double value is less than zero, NaN or Inf, return 0.
3805  mov(result_reg, Operand::Zero());
3806  b(al, &done);
3807 
3808  // Double value is >= 255, return 255.
3809  bind(&above_zero);
3810  Vmov(double_scratch, 255.0, result_reg);
3811  VFPCompareAndSetFlags(input_reg, double_scratch);
3812  b(le, &in_bounds);
3813  mov(result_reg, Operand(255));
3814  b(al, &done);
3815 
3816  // In 0-255 range, round and truncate.
3817  bind(&in_bounds);
3818  // Save FPSCR.
3819  vmrs(ip);
3820  // Set rounding mode to round to the nearest integer by clearing bits[23:22].
3821  bic(result_reg, ip, Operand(kVFPRoundingModeMask));
3822  vmsr(result_reg);
3823  vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3824  vmov(result_reg, double_scratch.low());
3825  // Restore FPSCR.
3826  vmsr(ip);
3827  bind(&done);
3828 }
3829 
3830 
3831 void MacroAssembler::Throw(BailoutReason reason) {
3832  Label throw_start;
3833  bind(&throw_start);
3834 #ifdef DEBUG
3835  const char* msg = GetBailoutReason(reason);
3836  if (msg != NULL) {
3837  RecordComment("Throw message: ");
3838  RecordComment(msg);
3839  }
3840 #endif
3841 
3842  mov(r0, Operand(Smi::FromInt(reason)));
3843  push(r0);
3844  // Disable stub call restrictions to always allow calls to throw.
3845  if (!has_frame_) {
3846  // We don't actually want to generate a pile of code for this, so just
3847  // claim there is a stack frame, without generating one.
3848  FrameScope scope(this, StackFrame::NONE);
3849  CallRuntime(Runtime::kHiddenThrowMessage, 1);
3850  } else {
3851  CallRuntime(Runtime::kHiddenThrowMessage, 1);
3852  }
3853  // will not return here
3854  if (is_const_pool_blocked()) {
3855  // If the calling code cares throw the exact number of
3856  // instructions generated, we insert padding here to keep the size
3857  // of the ThrowMessage macro constant.
3858  static const int kExpectedThrowMessageInstructions = 10;
3859  int throw_instructions = InstructionsGeneratedSince(&throw_start);
3860  ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
3861  while (throw_instructions++ < kExpectedThrowMessageInstructions) {
3862  nop();
3863  }
3864  }
3865 }
3866 
3867 
3868 void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
3869  Label L;
3870  b(NegateCondition(cc), &L);
3871  Throw(reason);
3872  // will not return here
3873  bind(&L);
3874 }
3875 
3876 
3877 void MacroAssembler::LoadInstanceDescriptors(Register map,
3878  Register descriptors) {
3879  ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3880 }
3881 
3882 
3883 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3884  ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3885  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3886 }
3887 
3888 
3889 void MacroAssembler::EnumLength(Register dst, Register map) {
3890  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3891  ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3892  and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
3893 }
3894 
3895 
3896 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3897  Register empty_fixed_array_value = r6;
3898  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3899  Label next, start;
3900  mov(r2, r0);
3901 
3902  // Check if the enum length field is properly initialized, indicating that
3903  // there is an enum cache.
3904  ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3905 
3906  EnumLength(r3, r1);
3907  cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3908  b(eq, call_runtime);
3909 
3910  jmp(&start);
3911 
3912  bind(&next);
3913  ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3914 
3915  // For all objects but the receiver, check that the cache is empty.
3916  EnumLength(r3, r1);
3917  cmp(r3, Operand(Smi::FromInt(0)));
3918  b(ne, call_runtime);
3919 
3920  bind(&start);
3921 
3922  // Check that there are no elements. Register r2 contains the current JS
3923  // object we've reached through the prototype chain.
3924  Label no_elements;
3925  ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3926  cmp(r2, empty_fixed_array_value);
3927  b(eq, &no_elements);
3928 
3929  // Second chance, the object may be using the empty slow element dictionary.
3930  CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3931  b(ne, call_runtime);
3932 
3933  bind(&no_elements);
3934  ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3935  cmp(r2, null_value);
3936  b(ne, &next);
3937 }
3938 
3939 
3940 void MacroAssembler::TestJSArrayForAllocationMemento(
3941  Register receiver_reg,
3942  Register scratch_reg,
3943  Label* no_memento_found) {
3944  ExternalReference new_space_start =
3945  ExternalReference::new_space_start(isolate());
3946  ExternalReference new_space_allocation_top =
3947  ExternalReference::new_space_allocation_top_address(isolate());
3948  add(scratch_reg, receiver_reg,
3949  Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3950  cmp(scratch_reg, Operand(new_space_start));
3951  b(lt, no_memento_found);
3952  mov(ip, Operand(new_space_allocation_top));
3953  ldr(ip, MemOperand(ip));
3954  cmp(scratch_reg, ip);
3955  b(gt, no_memento_found);
3956  ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3957  cmp(scratch_reg,
3958  Operand(isolate()->factory()->allocation_memento_map()));
3959 }
3960 
3961 
3962 Register GetRegisterThatIsNotOneOf(Register reg1,
3963  Register reg2,
3964  Register reg3,
3965  Register reg4,
3966  Register reg5,
3967  Register reg6) {
3968  RegList regs = 0;
3969  if (reg1.is_valid()) regs |= reg1.bit();
3970  if (reg2.is_valid()) regs |= reg2.bit();
3971  if (reg3.is_valid()) regs |= reg3.bit();
3972  if (reg4.is_valid()) regs |= reg4.bit();
3973  if (reg5.is_valid()) regs |= reg5.bit();
3974  if (reg6.is_valid()) regs |= reg6.bit();
3975 
3976  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
3977  Register candidate = Register::FromAllocationIndex(i);
3978  if (regs & candidate.bit()) continue;
3979  return candidate;
3980  }
3981  UNREACHABLE();
3982  return no_reg;
3983 }
3984 
3985 
3986 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3987  Register object,
3988  Register scratch0,
3989  Register scratch1,
3990  Label* found) {
3991  ASSERT(!scratch1.is(scratch0));
3992  Factory* factory = isolate()->factory();
3993  Register current = scratch0;
3994  Label loop_again;
3995 
3996  // scratch contained elements pointer.
3997  mov(current, object);
3998 
3999  // Loop based on the map going up the prototype chain.
4000  bind(&loop_again);
4001  ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4002  ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4003  Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
4004  cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
4005  b(eq, found);
4006  ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4007  cmp(current, Operand(factory->null_value()));
4008  b(ne, &loop_again);
4009 }
4010 
4011 
4012 #ifdef DEBUG
4013 bool AreAliased(Register reg1,
4014  Register reg2,
4015  Register reg3,
4016  Register reg4,
4017  Register reg5,
4018  Register reg6) {
4019  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4020  reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
4021 
4022  RegList regs = 0;
4023  if (reg1.is_valid()) regs |= reg1.bit();
4024  if (reg2.is_valid()) regs |= reg2.bit();
4025  if (reg3.is_valid()) regs |= reg3.bit();
4026  if (reg4.is_valid()) regs |= reg4.bit();
4027  if (reg5.is_valid()) regs |= reg5.bit();
4028  if (reg6.is_valid()) regs |= reg6.bit();
4029  int n_of_non_aliasing_regs = NumRegs(regs);
4030 
4031  return n_of_valid_regs != n_of_non_aliasing_regs;
4032 }
4033 #endif
4034 
4035 
4036 CodePatcher::CodePatcher(byte* address,
4037  int instructions,
4038  FlushICache flush_cache)
4039  : address_(address),
4040  size_(instructions * Assembler::kInstrSize),
4041  masm_(NULL, address, size_ + Assembler::kGap),
4042  flush_cache_(flush_cache) {
4043  // Create a new macro assembler pointing to the address of the code to patch.
4044  // The size is adjusted with kGap on order for the assembler to generate size
4045  // bytes of instructions without failing with buffer size constraints.
4046  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4047 }
4048 
4049 
4050 CodePatcher::~CodePatcher() {
4051  // Indicate that code has changed.
4052  if (flush_cache_ == FLUSH) {
4053  CPU::FlushICache(address_, size_);
4054  }
4055 
4056  // Check that the code was patched as expected.
4057  ASSERT(masm_.pc_ == address_ + size_);
4058  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4059 }
4060 
4061 
4062 void CodePatcher::Emit(Instr instr) {
4063  masm()->emit(instr);
4064 }
4065 
4066 
4067 void CodePatcher::Emit(Address addr) {
4068  masm()->emit(reinterpret_cast<Instr>(addr));
4069 }
4070 
4071 
4072 void CodePatcher::EmitCondition(Condition cond) {
4073  Instr instr = Assembler::instr_at(masm_.pc_);
4074  instr = (instr & ~kCondMask) | cond;
4075  masm_.emit(instr);
4076 }
4077 
4078 
4079 void MacroAssembler::TruncatingDiv(Register result,
4080  Register dividend,
4081  int32_t divisor) {
4082  ASSERT(!dividend.is(result));
4083  ASSERT(!dividend.is(ip));
4084  ASSERT(!result.is(ip));
4085  MultiplierAndShift ms(divisor);
4086  mov(ip, Operand(ms.multiplier()));
4087  smull(ip, result, dividend, ip);
4088  if (divisor > 0 && ms.multiplier() < 0) {
4089  add(result, result, Operand(dividend));
4090  }
4091  if (divisor < 0 && ms.multiplier() > 0) {
4092  sub(result, result, Operand(dividend));
4093  }
4094  if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift()));
4095  add(result, result, Operand(dividend, LSR, 31));
4096 }
4097 
4098 
4099 } } // namespace v8::internal
4100 
4101 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:5480
const intptr_t kDoubleAlignmentMask
Definition: v8globals.h:53
const Register r3
const Instr kLdrPCPattern
const LowDwVfpRegister d0
const DwVfpRegister d31
int NumRegs(RegList reglist)
Definition: frames.cc:1589
const Register r6
TypeImpl< ZoneTypeConfig > Type
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:41
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
const LowDwVfpRegister d15
#define ASSERT(condition)
Definition: checks.h:329
const int kPointerSizeLog2
Definition: globals.h:281
const uint32_t kStringRepresentationMask
Definition: objects.h:615
const Register r2
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
int WhichPowerOf2(uint32_t x)
Definition: utils.h:57
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
const Register pp
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
const intptr_t kHeapObjectTagMask
Definition: v8.h:5475
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
const VmovIndex VmovIndexHi
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for ARMv7(ARM only)") DEFINE_bool(enable_32dregs
const DwVfpRegister d16
const uint32_t kNotStringTag
Definition: objects.h:599
const Register sp
const uint32_t kVFPDefaultNaNModeControlBit
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
const LowDwVfpRegister d14
const int kDoubleSize
Definition: globals.h:266
const LowDwVfpRegister d13
#define kNumSafepointSavedRegisters
Definition: frames-arm64.h:52
PrologueFrameMode
Definition: frames.h:957
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
const Register ip
const Register r9
const int kPointerSize
Definition: globals.h:268
#define kScratchDoubleReg
const Address kZapValue
Definition: v8globals.h:82
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
const int kHeapObjectTag
Definition: v8.h:5473
bool IsAligned(T value, U alignment)
Definition: utils.h:211
#define cp
const Register pc
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:51
int TenToThe(int exponent)
Definition: utils.h:880
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:598
#define kRootRegister
const uint32_t kInternalizedTag
Definition: objects.h:605
InvokeFlag
AllocationFlags
const Register lr
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
Definition: shell.cc:171
const uint32_t kIsNotStringMask
Definition: objects.h:597
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
const Register r1
const char * GetBailoutReason(BailoutReason reason)
Definition: objects.cc:16437
#define kDoubleRegZero
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
const Instr kLdrPpPattern
MemOperand FieldMemOperand(Register object, int offset)
const int kNumSafepointRegisters
Definition: frames-arm.h:92
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const VmovIndex VmovIndexLo
const intptr_t kPointerAlignment
Definition: v8globals.h:48
const int kSmiTagSize
Definition: v8.h:5479
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
const uint32_t kOneByteStringTag
Definition: objects.h:611
const int kSmiTag
Definition: v8.h:5478
const uint32_t kIsIndirectStringTag
Definition: objects.h:623
HeapObject * obj
const int kPageSizeBits
Definition: v8globals.h:95
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
const uint32_t kVFPRoundingModeMask
const Register no_reg
const LowDwVfpRegister d1
const Register fp
#define STATIC_ASSERT(test)
Definition: checks.h:341
#define kSafepointSavedRegisters
Definition: frames-arm64.h:51
int expected_size
const intptr_t kDoubleAlignment
Definition: v8globals.h:52
const int kCharSize
Definition: globals.h:261
const Register r5
const uint32_t kStringEncodingMask
Definition: objects.h:609
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register r4