v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
29 
30 #include "v8.h"
31 
32 #if defined(V8_TARGET_ARCH_ARM)
33 
34 #include "bootstrapper.h"
35 #include "codegen.h"
36 #include "debug.h"
37 #include "runtime.h"
38 
39 namespace v8 {
40 namespace internal {
41 
42 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43  : Assembler(arg_isolate, buffer, size),
44  generating_stub_(false),
45  allow_stub_calls_(true),
46  has_frame_(false) {
47  if (isolate() != NULL) {
48  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
49  isolate());
50  }
51 }
52 
53 
54 // We always generate arm code, never thumb code, even if V8 is compiled to
55 // thumb, so we require inter-working support
56 #if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
57 #error "flag -mthumb-interwork missing"
58 #endif
59 
60 
61 // We do not support thumb inter-working with an arm architecture not supporting
62 // the blx instruction (below v5t). If you know what CPU you are compiling for
63 // you can use -march=armv7 or similar.
64 #if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
65 # error "For thumb inter-working we require an architecture which supports blx"
66 #endif
67 
68 
69 // Using bx does not yield better code, so use it only when required
70 #if defined(USE_THUMB_INTERWORK)
71 #define USE_BX 1
72 #endif
73 
74 
75 void MacroAssembler::Jump(Register target, Condition cond) {
76 #if USE_BX
77  bx(target, cond);
78 #else
79  mov(pc, Operand(target), LeaveCC, cond);
80 #endif
81 }
82 
83 
84 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
85  Condition cond) {
86 #if USE_BX
87  mov(ip, Operand(target, rmode));
88  bx(ip, cond);
89 #else
90  mov(pc, Operand(target, rmode), LeaveCC, cond);
91 #endif
92 }
93 
94 
95 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
96  Condition cond) {
97  ASSERT(!RelocInfo::IsCodeTarget(rmode));
98  Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
99 }
100 
101 
102 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
103  Condition cond) {
104  ASSERT(RelocInfo::IsCodeTarget(rmode));
105  // 'code' is always generated ARM code, never THUMB code
106  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
107 }
108 
109 
110 int MacroAssembler::CallSize(Register target, Condition cond) {
111 #ifdef USE_BLX
112  return kInstrSize;
113 #else
114  return 2 * kInstrSize;
115 #endif
116 }
117 
118 
119 void MacroAssembler::Call(Register target, Condition cond) {
120  // Block constant pool for the call instruction sequence.
121  BlockConstPoolScope block_const_pool(this);
122  Label start;
123  bind(&start);
124 #ifdef USE_BLX
125  blx(target, cond);
126 #else
127  // set lr for return at current pc + 8
128  mov(lr, Operand(pc), LeaveCC, cond);
129  mov(pc, Operand(target), LeaveCC, cond);
130 #endif
131  ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
132 }
133 
134 
135 int MacroAssembler::CallSize(
136  Address target, RelocInfo::Mode rmode, Condition cond) {
137  int size = 2 * kInstrSize;
138  Instr mov_instr = cond | MOV | LeaveCC;
139  intptr_t immediate = reinterpret_cast<intptr_t>(target);
140  if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
141  size += kInstrSize;
142  }
143  return size;
144 }
145 
146 
147 int MacroAssembler::CallSizeNotPredictableCodeSize(
148  Address target, RelocInfo::Mode rmode, Condition cond) {
149  int size = 2 * kInstrSize;
150  Instr mov_instr = cond | MOV | LeaveCC;
151  intptr_t immediate = reinterpret_cast<intptr_t>(target);
152  if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
153  size += kInstrSize;
154  }
155  return size;
156 }
157 
158 
159 void MacroAssembler::Call(Address target,
160  RelocInfo::Mode rmode,
161  Condition cond,
163  // Block constant pool for the call instruction sequence.
164  BlockConstPoolScope block_const_pool(this);
165  Label start;
166  bind(&start);
167 
168  bool old_predictable_code_size = predictable_code_size();
169  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
170  set_predictable_code_size(true);
171  }
172 
173 #ifdef USE_BLX
174  // Call sequence on V7 or later may be :
175  // movw ip, #... @ call address low 16
176  // movt ip, #... @ call address high 16
177  // blx ip
178  // @ return address
179  // Or for pre-V7 or values that may be back-patched
180  // to avoid ICache flushes:
181  // ldr ip, [pc, #...] @ call address
182  // blx ip
183  // @ return address
184 
185  // Statement positions are expected to be recorded when the target
186  // address is loaded. The mov method will automatically record
187  // positions when pc is the target, since this is not the case here
188  // we have to do it explicitly.
189  positions_recorder()->WriteRecordedPositions();
190 
191  mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
192  blx(ip, cond);
193 
194 #else
195  // Set lr for return at current pc + 8.
196  mov(lr, Operand(pc), LeaveCC, cond);
197  // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
198  mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
199 #endif
200  ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
201  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
202  set_predictable_code_size(old_predictable_code_size);
203  }
204 }
205 
206 
207 int MacroAssembler::CallSize(Handle<Code> code,
208  RelocInfo::Mode rmode,
209  TypeFeedbackId ast_id,
210  Condition cond) {
211  return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
212 }
213 
214 
215 void MacroAssembler::Call(Handle<Code> code,
216  RelocInfo::Mode rmode,
217  TypeFeedbackId ast_id,
218  Condition cond,
220  Label start;
221  bind(&start);
222  ASSERT(RelocInfo::IsCodeTarget(rmode));
223  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
224  SetRecordedAstId(ast_id);
225  rmode = RelocInfo::CODE_TARGET_WITH_ID;
226  }
227  // 'code' is always generated ARM code, never THUMB code
228  Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
229 }
230 
231 
232 void MacroAssembler::Ret(Condition cond) {
233 #if USE_BX
234  bx(lr, cond);
235 #else
236  mov(pc, Operand(lr), LeaveCC, cond);
237 #endif
238 }
239 
240 
241 void MacroAssembler::Drop(int count, Condition cond) {
242  if (count > 0) {
243  add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
244  }
245 }
246 
247 
248 void MacroAssembler::Ret(int drop, Condition cond) {
249  Drop(drop, cond);
250  Ret(cond);
251 }
252 
253 
254 void MacroAssembler::Swap(Register reg1,
255  Register reg2,
256  Register scratch,
257  Condition cond) {
258  if (scratch.is(no_reg)) {
259  eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
260  eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
261  eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
262  } else {
263  mov(scratch, reg1, LeaveCC, cond);
264  mov(reg1, reg2, LeaveCC, cond);
265  mov(reg2, scratch, LeaveCC, cond);
266  }
267 }
268 
269 
270 void MacroAssembler::Call(Label* target) {
271  bl(target);
272 }
273 
274 
275 void MacroAssembler::Push(Handle<Object> handle) {
276  mov(ip, Operand(handle));
277  push(ip);
278 }
279 
280 
281 void MacroAssembler::Move(Register dst, Handle<Object> value) {
282  mov(dst, Operand(value));
283 }
284 
285 
286 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
287  if (!dst.is(src)) {
288  mov(dst, src, LeaveCC, cond);
289  }
290 }
291 
292 
293 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
294  ASSERT(CpuFeatures::IsSupported(VFP2));
295  CpuFeatures::Scope scope(VFP2);
296  if (!dst.is(src)) {
297  vmov(dst, src);
298  }
299 }
300 
301 
302 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
303  Condition cond) {
304  if (!src2.is_reg() &&
305  !src2.must_output_reloc_info(this) &&
306  src2.immediate() == 0) {
307  mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
308  } else if (!src2.is_single_instruction(this) &&
309  !src2.must_output_reloc_info(this) &&
310  CpuFeatures::IsSupported(ARMv7) &&
311  IsPowerOf2(src2.immediate() + 1)) {
312  ubfx(dst, src1, 0,
313  WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
314  } else {
315  and_(dst, src1, src2, LeaveCC, cond);
316  }
317 }
318 
319 
320 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
321  Condition cond) {
322  ASSERT(lsb < 32);
323  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
324  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
325  and_(dst, src1, Operand(mask), LeaveCC, cond);
326  if (lsb != 0) {
327  mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
328  }
329  } else {
330  ubfx(dst, src1, lsb, width, cond);
331  }
332 }
333 
334 
335 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
336  Condition cond) {
337  ASSERT(lsb < 32);
338  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
339  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
340  and_(dst, src1, Operand(mask), LeaveCC, cond);
341  int shift_up = 32 - lsb - width;
342  int shift_down = lsb + shift_up;
343  if (shift_up != 0) {
344  mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
345  }
346  if (shift_down != 0) {
347  mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
348  }
349  } else {
350  sbfx(dst, src1, lsb, width, cond);
351  }
352 }
353 
354 
355 void MacroAssembler::Bfi(Register dst,
356  Register src,
357  Register scratch,
358  int lsb,
359  int width,
360  Condition cond) {
361  ASSERT(0 <= lsb && lsb < 32);
362  ASSERT(0 <= width && width < 32);
363  ASSERT(lsb + width < 32);
364  ASSERT(!scratch.is(dst));
365  if (width == 0) return;
366  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
367  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
368  bic(dst, dst, Operand(mask));
369  and_(scratch, src, Operand((1 << width) - 1));
370  mov(scratch, Operand(scratch, LSL, lsb));
371  orr(dst, dst, scratch);
372  } else {
373  bfi(dst, src, lsb, width, cond);
374  }
375 }
376 
377 
378 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
379  Condition cond) {
380  ASSERT(lsb < 32);
381  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
382  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
383  bic(dst, src, Operand(mask));
384  } else {
385  Move(dst, src, cond);
386  bfc(dst, lsb, width, cond);
387  }
388 }
389 
390 
391 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
392  Condition cond) {
393  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
394  ASSERT(!dst.is(pc) && !src.rm().is(pc));
395  ASSERT((satpos >= 0) && (satpos <= 31));
396 
397  // These asserts are required to ensure compatibility with the ARMv7
398  // implementation.
399  ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
400  ASSERT(src.rs().is(no_reg));
401 
402  Label done;
403  int satval = (1 << satpos) - 1;
404 
405  if (cond != al) {
406  b(NegateCondition(cond), &done); // Skip saturate if !condition.
407  }
408  if (!(src.is_reg() && dst.is(src.rm()))) {
409  mov(dst, src);
410  }
411  tst(dst, Operand(~satval));
412  b(eq, &done);
413  mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
414  mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
415  bind(&done);
416  } else {
417  usat(dst, satpos, src, cond);
418  }
419 }
420 
421 
422 void MacroAssembler::LoadRoot(Register destination,
423  Heap::RootListIndex index,
424  Condition cond) {
425  ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
426 }
427 
428 
429 void MacroAssembler::StoreRoot(Register source,
430  Heap::RootListIndex index,
431  Condition cond) {
432  str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
433 }
434 
435 
436 void MacroAssembler::LoadHeapObject(Register result,
437  Handle<HeapObject> object) {
438  if (isolate()->heap()->InNewSpace(*object)) {
439  Handle<JSGlobalPropertyCell> cell =
440  isolate()->factory()->NewJSGlobalPropertyCell(object);
441  mov(result, Operand(cell));
442  ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
443  } else {
444  mov(result, Operand(object));
445  }
446 }
447 
448 
449 void MacroAssembler::InNewSpace(Register object,
450  Register scratch,
451  Condition cond,
452  Label* branch) {
453  ASSERT(cond == eq || cond == ne);
454  and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
455  cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
456  b(cond, branch);
457 }
458 
459 
460 void MacroAssembler::RecordWriteField(
461  Register object,
462  int offset,
463  Register value,
464  Register dst,
465  LinkRegisterStatus lr_status,
466  SaveFPRegsMode save_fp,
467  RememberedSetAction remembered_set_action,
468  SmiCheck smi_check) {
469  // First, check if a write barrier is even needed. The tests below
470  // catch stores of Smis.
471  Label done;
472 
473  // Skip barrier if writing a smi.
474  if (smi_check == INLINE_SMI_CHECK) {
475  JumpIfSmi(value, &done);
476  }
477 
478  // Although the object register is tagged, the offset is relative to the start
479  // of the object, so so offset must be a multiple of kPointerSize.
480  ASSERT(IsAligned(offset, kPointerSize));
481 
482  add(dst, object, Operand(offset - kHeapObjectTag));
483  if (emit_debug_code()) {
484  Label ok;
485  tst(dst, Operand((1 << kPointerSizeLog2) - 1));
486  b(eq, &ok);
487  stop("Unaligned cell in write barrier");
488  bind(&ok);
489  }
490 
491  RecordWrite(object,
492  dst,
493  value,
494  lr_status,
495  save_fp,
496  remembered_set_action,
498 
499  bind(&done);
500 
501  // Clobber clobbered input registers when running with the debug-code flag
502  // turned on to provoke errors.
503  if (emit_debug_code()) {
504  mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
505  mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
506  }
507 }
508 
509 
510 // Will clobber 4 registers: object, address, scratch, ip. The
511 // register 'object' contains a heap object pointer. The heap object
512 // tag is shifted away.
513 void MacroAssembler::RecordWrite(Register object,
514  Register address,
515  Register value,
516  LinkRegisterStatus lr_status,
517  SaveFPRegsMode fp_mode,
518  RememberedSetAction remembered_set_action,
519  SmiCheck smi_check) {
520  // The compiled code assumes that record write doesn't change the
521  // context register, so we check that none of the clobbered
522  // registers are cp.
523  ASSERT(!address.is(cp) && !value.is(cp));
524 
525  if (emit_debug_code()) {
526  ldr(ip, MemOperand(address));
527  cmp(ip, value);
528  Check(eq, "Wrong address or value passed to RecordWrite");
529  }
530 
531  Label done;
532 
533  if (smi_check == INLINE_SMI_CHECK) {
534  ASSERT_EQ(0, kSmiTag);
535  tst(value, Operand(kSmiTagMask));
536  b(eq, &done);
537  }
538 
539  CheckPageFlag(value,
540  value, // Used as scratch.
541  MemoryChunk::kPointersToHereAreInterestingMask,
542  eq,
543  &done);
544  CheckPageFlag(object,
545  value, // Used as scratch.
546  MemoryChunk::kPointersFromHereAreInterestingMask,
547  eq,
548  &done);
549 
550  // Record the actual write.
551  if (lr_status == kLRHasNotBeenSaved) {
552  push(lr);
553  }
554  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
555  CallStub(&stub);
556  if (lr_status == kLRHasNotBeenSaved) {
557  pop(lr);
558  }
559 
560  bind(&done);
561 
562  // Clobber clobbered registers when running with the debug-code flag
563  // turned on to provoke errors.
564  if (emit_debug_code()) {
565  mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
566  mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
567  }
568 }
569 
570 
571 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
572  Register address,
573  Register scratch,
574  SaveFPRegsMode fp_mode,
575  RememberedSetFinalAction and_then) {
576  Label done;
577  if (emit_debug_code()) {
578  Label ok;
579  JumpIfNotInNewSpace(object, scratch, &ok);
580  stop("Remembered set pointer is in new space");
581  bind(&ok);
582  }
583  // Load store buffer top.
584  ExternalReference store_buffer =
585  ExternalReference::store_buffer_top(isolate());
586  mov(ip, Operand(store_buffer));
587  ldr(scratch, MemOperand(ip));
588  // Store pointer to buffer and increment buffer top.
589  str(address, MemOperand(scratch, kPointerSize, PostIndex));
590  // Write back new top of buffer.
591  str(scratch, MemOperand(ip));
592  // Call stub on end of buffer.
593  // Check for end of buffer.
594  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
595  if (and_then == kFallThroughAtEnd) {
596  b(eq, &done);
597  } else {
598  ASSERT(and_then == kReturnAtEnd);
599  Ret(eq);
600  }
601  push(lr);
602  StoreBufferOverflowStub store_buffer_overflow =
603  StoreBufferOverflowStub(fp_mode);
604  CallStub(&store_buffer_overflow);
605  pop(lr);
606  bind(&done);
607  if (and_then == kReturnAtEnd) {
608  Ret();
609  }
610 }
611 
612 
613 // Push and pop all registers that can hold pointers.
614 void MacroAssembler::PushSafepointRegisters() {
615  // Safepoints expect a block of contiguous register values starting with r0:
617  // Safepoints expect a block of kNumSafepointRegisters values on the
618  // stack, so adjust the stack for unsaved registers.
619  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
620  ASSERT(num_unsaved >= 0);
621  sub(sp, sp, Operand(num_unsaved * kPointerSize));
623 }
624 
625 
626 void MacroAssembler::PopSafepointRegisters() {
627  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
629  add(sp, sp, Operand(num_unsaved * kPointerSize));
630 }
631 
632 
633 void MacroAssembler::PushSafepointRegistersAndDoubles() {
634  PushSafepointRegisters();
635  sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
636  kDoubleSize));
637  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
638  vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
639  }
640 }
641 
642 
643 void MacroAssembler::PopSafepointRegistersAndDoubles() {
644  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
645  vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
646  }
647  add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
648  kDoubleSize));
649  PopSafepointRegisters();
650 }
651 
652 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
653  Register dst) {
654  str(src, SafepointRegistersAndDoublesSlot(dst));
655 }
656 
657 
658 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
659  str(src, SafepointRegisterSlot(dst));
660 }
661 
662 
663 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
664  ldr(dst, SafepointRegisterSlot(src));
665 }
666 
667 
668 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
669  // The registers are pushed starting with the highest encoding,
670  // which means that lowest encodings are closest to the stack pointer.
671  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
672  return reg_code;
673 }
674 
675 
676 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
677  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
678 }
679 
680 
681 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
682  // General purpose registers are pushed last on the stack.
683  int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
684  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
685  return MemOperand(sp, doubles_size + register_offset);
686 }
687 
688 
689 void MacroAssembler::Ldrd(Register dst1, Register dst2,
690  const MemOperand& src, Condition cond) {
691  ASSERT(src.rm().is(no_reg));
692  ASSERT(!dst1.is(lr)); // r14.
693  ASSERT_EQ(0, dst1.code() % 2);
694  ASSERT_EQ(dst1.code() + 1, dst2.code());
695 
696  // V8 does not use this addressing mode, so the fallback code
697  // below doesn't support it yet.
698  ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
699 
700  // Generate two ldr instructions if ldrd is not available.
701  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
702  CpuFeatures::Scope scope(ARMv7);
703  ldrd(dst1, dst2, src, cond);
704  } else {
705  if ((src.am() == Offset) || (src.am() == NegOffset)) {
706  MemOperand src2(src);
707  src2.set_offset(src2.offset() + 4);
708  if (dst1.is(src.rn())) {
709  ldr(dst2, src2, cond);
710  ldr(dst1, src, cond);
711  } else {
712  ldr(dst1, src, cond);
713  ldr(dst2, src2, cond);
714  }
715  } else { // PostIndex or NegPostIndex.
716  ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
717  if (dst1.is(src.rn())) {
718  ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
719  ldr(dst1, src, cond);
720  } else {
721  MemOperand src2(src);
722  src2.set_offset(src2.offset() - 4);
723  ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
724  ldr(dst2, src2, cond);
725  }
726  }
727  }
728 }
729 
730 
731 void MacroAssembler::Strd(Register src1, Register src2,
732  const MemOperand& dst, Condition cond) {
733  ASSERT(dst.rm().is(no_reg));
734  ASSERT(!src1.is(lr)); // r14.
735  ASSERT_EQ(0, src1.code() % 2);
736  ASSERT_EQ(src1.code() + 1, src2.code());
737 
738  // V8 does not use this addressing mode, so the fallback code
739  // below doesn't support it yet.
740  ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
741 
742  // Generate two str instructions if strd is not available.
743  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
744  CpuFeatures::Scope scope(ARMv7);
745  strd(src1, src2, dst, cond);
746  } else {
747  MemOperand dst2(dst);
748  if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
749  dst2.set_offset(dst2.offset() + 4);
750  str(src1, dst, cond);
751  str(src2, dst2, cond);
752  } else { // PostIndex or NegPostIndex.
753  ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
754  dst2.set_offset(dst2.offset() - 4);
755  str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
756  str(src2, dst2, cond);
757  }
758  }
759 }
760 
761 
762 void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
763  const Register scratch,
764  const Condition cond) {
765  vmrs(scratch, cond);
766  bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
767  vmsr(scratch, cond);
768 }
769 
770 
771 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
772  const DwVfpRegister src2,
773  const Condition cond) {
774  // Compare and move FPSCR flags to the normal condition flags.
775  VFPCompareAndLoadFlags(src1, src2, pc, cond);
776 }
777 
778 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
779  const double src2,
780  const Condition cond) {
781  // Compare and move FPSCR flags to the normal condition flags.
782  VFPCompareAndLoadFlags(src1, src2, pc, cond);
783 }
784 
785 
786 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
787  const DwVfpRegister src2,
788  const Register fpscr_flags,
789  const Condition cond) {
790  // Compare and load FPSCR.
791  vcmp(src1, src2, cond);
792  vmrs(fpscr_flags, cond);
793 }
794 
795 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
796  const double src2,
797  const Register fpscr_flags,
798  const Condition cond) {
799  // Compare and load FPSCR.
800  vcmp(src1, src2, cond);
801  vmrs(fpscr_flags, cond);
802 }
803 
804 void MacroAssembler::Vmov(const DwVfpRegister dst,
805  const double imm,
806  const Register scratch,
807  const Condition cond) {
808  ASSERT(CpuFeatures::IsEnabled(VFP2));
809  static const DoubleRepresentation minus_zero(-0.0);
810  static const DoubleRepresentation zero(0.0);
811  DoubleRepresentation value(imm);
812  // Handle special values first.
813  if (value.bits == zero.bits) {
814  vmov(dst, kDoubleRegZero, cond);
815  } else if (value.bits == minus_zero.bits) {
816  vneg(dst, kDoubleRegZero, cond);
817  } else {
818  vmov(dst, imm, scratch, cond);
819  }
820 }
821 
822 
823 void MacroAssembler::EnterFrame(StackFrame::Type type) {
824  // r0-r3: preserved
825  stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
826  mov(ip, Operand(Smi::FromInt(type)));
827  push(ip);
828  mov(ip, Operand(CodeObject()));
829  push(ip);
830  add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
831 }
832 
833 
834 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
835  // r0: preserved
836  // r1: preserved
837  // r2: preserved
838 
839  // Drop the execution stack down to the frame pointer and restore
840  // the caller frame pointer and return address.
841  mov(sp, fp);
842  ldm(ia_w, sp, fp.bit() | lr.bit());
843 }
844 
845 
846 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
847  // Set up the frame structure on the stack.
848  ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
849  ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
850  ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
851  Push(lr, fp);
852  mov(fp, Operand(sp)); // Set up new frame pointer.
853  // Reserve room for saved entry sp and code object.
854  sub(sp, sp, Operand(2 * kPointerSize));
855  if (emit_debug_code()) {
856  mov(ip, Operand(0));
857  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
858  }
859  mov(ip, Operand(CodeObject()));
860  str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
861 
862  // Save the frame pointer and the context in top.
863  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
864  str(fp, MemOperand(ip));
865  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
866  str(cp, MemOperand(ip));
867 
868  // Optionally save all double registers.
869  if (save_doubles) {
870  DwVfpRegister first = d0;
871  DwVfpRegister last =
872  DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
873  vstm(db_w, sp, first, last);
874  // Note that d0 will be accessible at
875  // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
876  // since the sp slot and code slot were pushed after the fp.
877  }
878 
879  // Reserve place for the return address and stack space and align the frame
880  // preparing for calling the runtime function.
881  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
882  sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
883  if (frame_alignment > 0) {
884  ASSERT(IsPowerOf2(frame_alignment));
885  and_(sp, sp, Operand(-frame_alignment));
886  }
887 
888  // Set the exit frame sp value to point just before the return address
889  // location.
890  add(ip, sp, Operand(kPointerSize));
891  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
892 }
893 
894 
895 void MacroAssembler::InitializeNewString(Register string,
896  Register length,
897  Heap::RootListIndex map_index,
898  Register scratch1,
899  Register scratch2) {
900  mov(scratch1, Operand(length, LSL, kSmiTagSize));
901  LoadRoot(scratch2, map_index);
902  str(scratch1, FieldMemOperand(string, String::kLengthOffset));
903  mov(scratch1, Operand(String::kEmptyHashField));
904  str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
905  str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
906 }
907 
908 
909 int MacroAssembler::ActivationFrameAlignment() {
910 #if defined(V8_HOST_ARCH_ARM)
911  // Running on the real platform. Use the alignment as mandated by the local
912  // environment.
913  // Note: This will break if we ever start generating snapshots on one ARM
914  // platform for another ARM platform with a different alignment.
915  return OS::ActivationFrameAlignment();
916 #else // defined(V8_HOST_ARCH_ARM)
917  // If we are using the simulator then we should always align to the expected
918  // alignment. As the simulator is used to generate snapshots we do not know
919  // if the target platform will need alignment, so this is controlled from a
920  // flag.
921  return FLAG_sim_stack_alignment;
922 #endif // defined(V8_HOST_ARCH_ARM)
923 }
924 
925 
926 void MacroAssembler::LeaveExitFrame(bool save_doubles,
927  Register argument_count) {
928  // Optionally restore all double registers.
929  if (save_doubles) {
930  // Calculate the stack location of the saved doubles and restore them.
931  const int offset = 2 * kPointerSize;
932  sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
933  DwVfpRegister first = d0;
934  DwVfpRegister last =
935  DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
936  vldm(ia, r3, first, last);
937  }
938 
939  // Clear top frame.
940  mov(r3, Operand(0, RelocInfo::NONE));
941  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
942  str(r3, MemOperand(ip));
943 
944  // Restore current context from top and clear it in debug mode.
945  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
946  ldr(cp, MemOperand(ip));
947 #ifdef DEBUG
948  str(r3, MemOperand(ip));
949 #endif
950 
951  // Tear down the exit frame, pop the arguments, and return.
952  mov(sp, Operand(fp));
953  ldm(ia_w, sp, fp.bit() | lr.bit());
954  if (argument_count.is_valid()) {
955  add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
956  }
957 }
958 
959 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
960  ASSERT(CpuFeatures::IsSupported(VFP2));
961  if (use_eabi_hardfloat()) {
962  Move(dst, d0);
963  } else {
964  vmov(dst, r0, r1);
965  }
966 }
967 
968 
969 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
970  // This macro takes the dst register to make the code more readable
971  // at the call sites. However, the dst register has to be r5 to
972  // follow the calling convention which requires the call type to be
973  // in r5.
974  ASSERT(dst.is(r5));
975  if (call_kind == CALL_AS_FUNCTION) {
976  mov(dst, Operand(Smi::FromInt(1)));
977  } else {
978  mov(dst, Operand(Smi::FromInt(0)));
979  }
980 }
981 
982 
983 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
984  const ParameterCount& actual,
985  Handle<Code> code_constant,
986  Register code_reg,
987  Label* done,
988  bool* definitely_mismatches,
990  const CallWrapper& call_wrapper,
991  CallKind call_kind) {
992  bool definitely_matches = false;
993  *definitely_mismatches = false;
994  Label regular_invoke;
995 
996  // Check whether the expected and actual arguments count match. If not,
997  // setup registers according to contract with ArgumentsAdaptorTrampoline:
998  // r0: actual arguments count
999  // r1: function (passed through to callee)
1000  // r2: expected arguments count
1001  // r3: callee code entry
1002 
1003  // The code below is made a lot easier because the calling code already sets
1004  // up actual and expected registers according to the contract if values are
1005  // passed in registers.
1006  ASSERT(actual.is_immediate() || actual.reg().is(r0));
1007  ASSERT(expected.is_immediate() || expected.reg().is(r2));
1008  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1009 
1010  if (expected.is_immediate()) {
1011  ASSERT(actual.is_immediate());
1012  if (expected.immediate() == actual.immediate()) {
1013  definitely_matches = true;
1014  } else {
1015  mov(r0, Operand(actual.immediate()));
1016  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1017  if (expected.immediate() == sentinel) {
1018  // Don't worry about adapting arguments for builtins that
1019  // don't want that done. Skip adaption code by making it look
1020  // like we have a match between expected and actual number of
1021  // arguments.
1022  definitely_matches = true;
1023  } else {
1024  *definitely_mismatches = true;
1025  mov(r2, Operand(expected.immediate()));
1026  }
1027  }
1028  } else {
1029  if (actual.is_immediate()) {
1030  cmp(expected.reg(), Operand(actual.immediate()));
1031  b(eq, &regular_invoke);
1032  mov(r0, Operand(actual.immediate()));
1033  } else {
1034  cmp(expected.reg(), Operand(actual.reg()));
1035  b(eq, &regular_invoke);
1036  }
1037  }
1038 
1039  if (!definitely_matches) {
1040  if (!code_constant.is_null()) {
1041  mov(r3, Operand(code_constant));
1042  add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1043  }
1044 
1045  Handle<Code> adaptor =
1046  isolate()->builtins()->ArgumentsAdaptorTrampoline();
1047  if (flag == CALL_FUNCTION) {
1048  call_wrapper.BeforeCall(CallSize(adaptor));
1049  SetCallKind(r5, call_kind);
1050  Call(adaptor);
1051  call_wrapper.AfterCall();
1052  if (!*definitely_mismatches) {
1053  b(done);
1054  }
1055  } else {
1056  SetCallKind(r5, call_kind);
1057  Jump(adaptor, RelocInfo::CODE_TARGET);
1058  }
1059  bind(&regular_invoke);
1060  }
1061 }
1062 
1063 
1064 void MacroAssembler::InvokeCode(Register code,
1065  const ParameterCount& expected,
1066  const ParameterCount& actual,
1067  InvokeFlag flag,
1068  const CallWrapper& call_wrapper,
1069  CallKind call_kind) {
1070  // You can't call a function without a valid frame.
1071  ASSERT(flag == JUMP_FUNCTION || has_frame());
1072 
1073  Label done;
1074  bool definitely_mismatches = false;
1075  InvokePrologue(expected, actual, Handle<Code>::null(), code,
1076  &done, &definitely_mismatches, flag,
1077  call_wrapper, call_kind);
1078  if (!definitely_mismatches) {
1079  if (flag == CALL_FUNCTION) {
1080  call_wrapper.BeforeCall(CallSize(code));
1081  SetCallKind(r5, call_kind);
1082  Call(code);
1083  call_wrapper.AfterCall();
1084  } else {
1085  ASSERT(flag == JUMP_FUNCTION);
1086  SetCallKind(r5, call_kind);
1087  Jump(code);
1088  }
1089 
1090  // Continue here if InvokePrologue does handle the invocation due to
1091  // mismatched parameter counts.
1092  bind(&done);
1093  }
1094 }
1095 
1096 
1097 void MacroAssembler::InvokeCode(Handle<Code> code,
1098  const ParameterCount& expected,
1099  const ParameterCount& actual,
1100  RelocInfo::Mode rmode,
1101  InvokeFlag flag,
1102  CallKind call_kind) {
1103  // You can't call a function without a valid frame.
1104  ASSERT(flag == JUMP_FUNCTION || has_frame());
1105 
1106  Label done;
1107  bool definitely_mismatches = false;
1108  InvokePrologue(expected, actual, code, no_reg,
1109  &done, &definitely_mismatches, flag,
1110  NullCallWrapper(), call_kind);
1111  if (!definitely_mismatches) {
1112  if (flag == CALL_FUNCTION) {
1113  SetCallKind(r5, call_kind);
1114  Call(code, rmode);
1115  } else {
1116  SetCallKind(r5, call_kind);
1117  Jump(code, rmode);
1118  }
1119 
1120  // Continue here if InvokePrologue does handle the invocation due to
1121  // mismatched parameter counts.
1122  bind(&done);
1123  }
1124 }
1125 
1126 
1127 void MacroAssembler::InvokeFunction(Register fun,
1128  const ParameterCount& actual,
1129  InvokeFlag flag,
1130  const CallWrapper& call_wrapper,
1131  CallKind call_kind) {
1132  // You can't call a function without a valid frame.
1133  ASSERT(flag == JUMP_FUNCTION || has_frame());
1134 
1135  // Contract with called JS functions requires that function is passed in r1.
1136  ASSERT(fun.is(r1));
1137 
1138  Register expected_reg = r2;
1139  Register code_reg = r3;
1140 
1141  ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1142  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1143  ldr(expected_reg,
1144  FieldMemOperand(code_reg,
1145  SharedFunctionInfo::kFormalParameterCountOffset));
1146  mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
1147  ldr(code_reg,
1148  FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1149 
1150  ParameterCount expected(expected_reg);
1151  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
1152 }
1153 
1154 
1155 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1156  const ParameterCount& actual,
1157  InvokeFlag flag,
1158  const CallWrapper& call_wrapper,
1159  CallKind call_kind) {
1160  // You can't call a function without a valid frame.
1161  ASSERT(flag == JUMP_FUNCTION || has_frame());
1162 
1163  // Get the function and setup the context.
1164  LoadHeapObject(r1, function);
1165  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1166 
1167  ParameterCount expected(function->shared()->formal_parameter_count());
1168  // We call indirectly through the code field in the function to
1169  // allow recompilation to take effect without changing any of the
1170  // call sites.
1171  ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1172  InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
1173 }
1174 
1175 
1176 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1177  Register map,
1178  Register scratch,
1179  Label* fail) {
1180  ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1181  IsInstanceJSObjectType(map, scratch, fail);
1182 }
1183 
1184 
1185 void MacroAssembler::IsInstanceJSObjectType(Register map,
1186  Register scratch,
1187  Label* fail) {
1188  ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1189  cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1190  b(lt, fail);
1191  cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1192  b(gt, fail);
1193 }
1194 
1195 
1196 void MacroAssembler::IsObjectJSStringType(Register object,
1197  Register scratch,
1198  Label* fail) {
1199  ASSERT(kNotStringTag != 0);
1200 
1201  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1202  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1203  tst(scratch, Operand(kIsNotStringMask));
1204  b(ne, fail);
1205 }
1206 
1207 
1208 #ifdef ENABLE_DEBUGGER_SUPPORT
1209 void MacroAssembler::DebugBreak() {
1210  mov(r0, Operand(0, RelocInfo::NONE));
1211  mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1212  CEntryStub ces(1);
1213  ASSERT(AllowThisStubCall(&ces));
1214  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1215 }
1216 #endif
1217 
1218 
1219 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1220  int handler_index) {
1221  // Adjust this code if not the case.
1222  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1223  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1224  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1225  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1226  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1227  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1228 
1229  // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
1230  // We will build up the handler from the bottom by pushing on the stack.
1231  // Set up the code object (r5) and the state (r6) for pushing.
1232  unsigned state =
1233  StackHandler::IndexField::encode(handler_index) |
1234  StackHandler::KindField::encode(kind);
1235  mov(r5, Operand(CodeObject()));
1236  mov(r6, Operand(state));
1237 
1238  // Push the frame pointer, context, state, and code object.
1239  if (kind == StackHandler::JS_ENTRY) {
1240  mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
1241  mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
1242  stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
1243  } else {
1244  stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1245  }
1246 
1247  // Link the current handler as the next handler.
1248  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1249  ldr(r5, MemOperand(r6));
1250  push(r5);
1251  // Set this new handler as the current one.
1252  str(sp, MemOperand(r6));
1253 }
1254 
1255 
1256 void MacroAssembler::PopTryHandler() {
1257  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1258  pop(r1);
1259  mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1260  add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1261  str(r1, MemOperand(ip));
1262 }
1263 
1264 
1265 void MacroAssembler::JumpToHandlerEntry() {
1266  // Compute the handler entry address and jump to it. The handler table is
1267  // a fixed array of (smi-tagged) code offsets.
1268  // r0 = exception, r1 = code object, r2 = state.
1269  ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
1270  add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1271  mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
1272  ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
1273  add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1274  add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump.
1275 }
1276 
1277 
1278 void MacroAssembler::Throw(Register value) {
1279  // Adjust this code if not the case.
1280  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1281  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1282  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1283  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1284  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1285  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1286 
1287  // The exception is expected in r0.
1288  if (!value.is(r0)) {
1289  mov(r0, value);
1290  }
1291  // Drop the stack pointer to the top of the top handler.
1292  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1293  ldr(sp, MemOperand(r3));
1294  // Restore the next handler.
1295  pop(r2);
1296  str(r2, MemOperand(r3));
1297 
1298  // Get the code object (r1) and state (r2). Restore the context and frame
1299  // pointer.
1300  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1301 
1302  // If the handler is a JS frame, restore the context to the frame.
1303  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1304  // or cp.
1305  tst(cp, cp);
1306  str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1307 
1308  JumpToHandlerEntry();
1309 }
1310 
1311 
1312 void MacroAssembler::ThrowUncatchable(Register value) {
1313  // Adjust this code if not the case.
1314  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1315  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1316  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1317  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1318  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1319  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1320 
1321  // The exception is expected in r0.
1322  if (!value.is(r0)) {
1323  mov(r0, value);
1324  }
1325  // Drop the stack pointer to the top of the top stack handler.
1326  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1327  ldr(sp, MemOperand(r3));
1328 
1329  // Unwind the handlers until the ENTRY handler is found.
1330  Label fetch_next, check_kind;
1331  jmp(&check_kind);
1332  bind(&fetch_next);
1333  ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1334 
1335  bind(&check_kind);
1336  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1337  ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1338  tst(r2, Operand(StackHandler::KindField::kMask));
1339  b(ne, &fetch_next);
1340 
1341  // Set the top handler address to next handler past the top ENTRY handler.
1342  pop(r2);
1343  str(r2, MemOperand(r3));
1344  // Get the code object (r1) and state (r2). Clear the context and frame
1345  // pointer (0 was saved in the handler).
1346  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1347 
1348  JumpToHandlerEntry();
1349 }
1350 
1351 
1352 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1353  Register scratch,
1354  Label* miss) {
1355  Label same_contexts;
1356 
1357  ASSERT(!holder_reg.is(scratch));
1358  ASSERT(!holder_reg.is(ip));
1359  ASSERT(!scratch.is(ip));
1360 
1361  // Load current lexical context from the stack frame.
1362  ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1363  // In debug mode, make sure the lexical context is set.
1364 #ifdef DEBUG
1365  cmp(scratch, Operand(0, RelocInfo::NONE));
1366  Check(ne, "we should not have an empty lexical context");
1367 #endif
1368 
1369  // Load the native context of the current context.
1370  int offset =
1371  Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1372  ldr(scratch, FieldMemOperand(scratch, offset));
1373  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1374 
1375  // Check the context is a native context.
1376  if (emit_debug_code()) {
1377  // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1378  // Cannot use ip as a temporary in this verification code. Due to the fact
1379  // that ip is clobbered as part of cmp with an object Operand.
1380  push(holder_reg); // Temporarily save holder on the stack.
1381  // Read the first word and compare to the native_context_map.
1382  ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1383  LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1384  cmp(holder_reg, ip);
1385  Check(eq, "JSGlobalObject::native_context should be a native context.");
1386  pop(holder_reg); // Restore holder.
1387  }
1388 
1389  // Check if both contexts are the same.
1390  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1391  cmp(scratch, Operand(ip));
1392  b(eq, &same_contexts);
1393 
1394  // Check the context is a native context.
1395  if (emit_debug_code()) {
1396  // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1397  // Cannot use ip as a temporary in this verification code. Due to the fact
1398  // that ip is clobbered as part of cmp with an object Operand.
1399  push(holder_reg); // Temporarily save holder on the stack.
1400  mov(holder_reg, ip); // Move ip to its holding place.
1401  LoadRoot(ip, Heap::kNullValueRootIndex);
1402  cmp(holder_reg, ip);
1403  Check(ne, "JSGlobalProxy::context() should not be null.");
1404 
1405  ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1406  LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1407  cmp(holder_reg, ip);
1408  Check(eq, "JSGlobalObject::native_context should be a native context.");
1409  // Restore ip is not needed. ip is reloaded below.
1410  pop(holder_reg); // Restore holder.
1411  // Restore ip to holder's context.
1412  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1413  }
1414 
1415  // Check that the security token in the calling global object is
1416  // compatible with the security token in the receiving global
1417  // object.
1418  int token_offset = Context::kHeaderSize +
1419  Context::SECURITY_TOKEN_INDEX * kPointerSize;
1420 
1421  ldr(scratch, FieldMemOperand(scratch, token_offset));
1422  ldr(ip, FieldMemOperand(ip, token_offset));
1423  cmp(scratch, Operand(ip));
1424  b(ne, miss);
1425 
1426  bind(&same_contexts);
1427 }
1428 
1429 
1430 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1431  // First of all we assign the hash seed to scratch.
1432  LoadRoot(scratch, Heap::kHashSeedRootIndex);
1433  SmiUntag(scratch);
1434 
1435  // Xor original key with a seed.
1436  eor(t0, t0, Operand(scratch));
1437 
1438  // Compute the hash code from the untagged key. This must be kept in sync
1439  // with ComputeIntegerHash in utils.h.
1440  //
1441  // hash = ~hash + (hash << 15);
1442  mvn(scratch, Operand(t0));
1443  add(t0, scratch, Operand(t0, LSL, 15));
1444  // hash = hash ^ (hash >> 12);
1445  eor(t0, t0, Operand(t0, LSR, 12));
1446  // hash = hash + (hash << 2);
1447  add(t0, t0, Operand(t0, LSL, 2));
1448  // hash = hash ^ (hash >> 4);
1449  eor(t0, t0, Operand(t0, LSR, 4));
1450  // hash = hash * 2057;
1451  mov(scratch, Operand(t0, LSL, 11));
1452  add(t0, t0, Operand(t0, LSL, 3));
1453  add(t0, t0, scratch);
1454  // hash = hash ^ (hash >> 16);
1455  eor(t0, t0, Operand(t0, LSR, 16));
1456 }
1457 
1458 
1459 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1460  Register elements,
1461  Register key,
1462  Register result,
1463  Register t0,
1464  Register t1,
1465  Register t2) {
1466  // Register use:
1467  //
1468  // elements - holds the slow-case elements of the receiver on entry.
1469  // Unchanged unless 'result' is the same register.
1470  //
1471  // key - holds the smi key on entry.
1472  // Unchanged unless 'result' is the same register.
1473  //
1474  // result - holds the result on exit if the load succeeded.
1475  // Allowed to be the same as 'key' or 'result'.
1476  // Unchanged on bailout so 'key' or 'result' can be used
1477  // in further computation.
1478  //
1479  // Scratch registers:
1480  //
1481  // t0 - holds the untagged key on entry and holds the hash once computed.
1482  //
1483  // t1 - used to hold the capacity mask of the dictionary
1484  //
1485  // t2 - used for the index into the dictionary.
1486  Label done;
1487 
1488  GetNumberHash(t0, t1);
1489 
1490  // Compute the capacity mask.
1491  ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1492  mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
1493  sub(t1, t1, Operand(1));
1494 
1495  // Generate an unrolled loop that performs a few probes before giving up.
1496  static const int kProbes = 4;
1497  for (int i = 0; i < kProbes; i++) {
1498  // Use t2 for index calculations and keep the hash intact in t0.
1499  mov(t2, t0);
1500  // Compute the masked index: (hash + i + i * i) & mask.
1501  if (i > 0) {
1502  add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1503  }
1504  and_(t2, t2, Operand(t1));
1505 
1506  // Scale the index by multiplying by the element size.
1507  ASSERT(SeededNumberDictionary::kEntrySize == 3);
1508  add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
1509 
1510  // Check if the key is identical to the name.
1511  add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1512  ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1513  cmp(key, Operand(ip));
1514  if (i != kProbes - 1) {
1515  b(eq, &done);
1516  } else {
1517  b(ne, miss);
1518  }
1519  }
1520 
1521  bind(&done);
1522  // Check that the value is a normal property.
1523  // t2: elements + (index * kPointerSize)
1524  const int kDetailsOffset =
1525  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1526  ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1527  tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1528  b(ne, miss);
1529 
1530  // Get the value at the masked, scaled index and return.
1531  const int kValueOffset =
1532  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1533  ldr(result, FieldMemOperand(t2, kValueOffset));
1534 }
1535 
1536 
1537 void MacroAssembler::AllocateInNewSpace(int object_size,
1538  Register result,
1539  Register scratch1,
1540  Register scratch2,
1541  Label* gc_required,
1543  if (!FLAG_inline_new) {
1544  if (emit_debug_code()) {
1545  // Trash the registers to simulate an allocation failure.
1546  mov(result, Operand(0x7091));
1547  mov(scratch1, Operand(0x7191));
1548  mov(scratch2, Operand(0x7291));
1549  }
1550  jmp(gc_required);
1551  return;
1552  }
1553 
1554  ASSERT(!result.is(scratch1));
1555  ASSERT(!result.is(scratch2));
1556  ASSERT(!scratch1.is(scratch2));
1557  ASSERT(!scratch1.is(ip));
1558  ASSERT(!scratch2.is(ip));
1559 
1560  // Make object size into bytes.
1561  if ((flags & SIZE_IN_WORDS) != 0) {
1562  object_size *= kPointerSize;
1563  }
1564  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1565 
1566  // Check relative positions of allocation top and limit addresses.
1567  // The values must be adjacent in memory to allow the use of LDM.
1568  // Also, assert that the registers are numbered such that the values
1569  // are loaded in the correct order.
1570  ExternalReference new_space_allocation_top =
1571  ExternalReference::new_space_allocation_top_address(isolate());
1572  ExternalReference new_space_allocation_limit =
1573  ExternalReference::new_space_allocation_limit_address(isolate());
1574  intptr_t top =
1575  reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1576  intptr_t limit =
1577  reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1578  ASSERT((limit - top) == kPointerSize);
1579  ASSERT(result.code() < ip.code());
1580 
1581  // Set up allocation top address and object size registers.
1582  Register topaddr = scratch1;
1583  Register obj_size_reg = scratch2;
1584  mov(topaddr, Operand(new_space_allocation_top));
1585  Operand obj_size_operand = Operand(object_size);
1586  if (!obj_size_operand.is_single_instruction(this)) {
1587  // We are about to steal IP, so we need to load this value first
1588  mov(obj_size_reg, obj_size_operand);
1589  }
1590 
1591  // This code stores a temporary value in ip. This is OK, as the code below
1592  // does not need ip for implicit literal generation.
1593  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1594  // Load allocation top into result and allocation limit into ip.
1595  ldm(ia, topaddr, result.bit() | ip.bit());
1596  } else {
1597  if (emit_debug_code()) {
1598  // Assert that result actually contains top on entry. ip is used
1599  // immediately below so this use of ip does not cause difference with
1600  // respect to register content between debug and release mode.
1601  ldr(ip, MemOperand(topaddr));
1602  cmp(result, ip);
1603  Check(eq, "Unexpected allocation top");
1604  }
1605  // Load allocation limit into ip. Result already contains allocation top.
1606  ldr(ip, MemOperand(topaddr, limit - top));
1607  }
1608 
1609  // Calculate new top and bail out if new space is exhausted. Use result
1610  // to calculate the new top.
1611  if (obj_size_operand.is_single_instruction(this)) {
1612  // We can add the size as an immediate
1613  add(scratch2, result, obj_size_operand, SetCC);
1614  } else {
1615  // Doesn't fit in an immediate, we have to use the register
1616  add(scratch2, result, obj_size_reg, SetCC);
1617  }
1618  b(cs, gc_required);
1619  cmp(scratch2, Operand(ip));
1620  b(hi, gc_required);
1621  str(scratch2, MemOperand(topaddr));
1622 
1623  // Tag object if requested.
1624  if ((flags & TAG_OBJECT) != 0) {
1625  add(result, result, Operand(kHeapObjectTag));
1626  }
1627 }
1628 
1629 
1630 void MacroAssembler::AllocateInNewSpace(Register object_size,
1631  Register result,
1632  Register scratch1,
1633  Register scratch2,
1634  Label* gc_required,
1635  AllocationFlags flags) {
1636  if (!FLAG_inline_new) {
1637  if (emit_debug_code()) {
1638  // Trash the registers to simulate an allocation failure.
1639  mov(result, Operand(0x7091));
1640  mov(scratch1, Operand(0x7191));
1641  mov(scratch2, Operand(0x7291));
1642  }
1643  jmp(gc_required);
1644  return;
1645  }
1646 
1647  // Assert that the register arguments are different and that none of
1648  // them are ip. ip is used explicitly in the code generated below.
1649  ASSERT(!result.is(scratch1));
1650  ASSERT(!result.is(scratch2));
1651  ASSERT(!scratch1.is(scratch2));
1652  ASSERT(!object_size.is(ip));
1653  ASSERT(!result.is(ip));
1654  ASSERT(!scratch1.is(ip));
1655  ASSERT(!scratch2.is(ip));
1656 
1657  // Check relative positions of allocation top and limit addresses.
1658  // The values must be adjacent in memory to allow the use of LDM.
1659  // Also, assert that the registers are numbered such that the values
1660  // are loaded in the correct order.
1661  ExternalReference new_space_allocation_top =
1662  ExternalReference::new_space_allocation_top_address(isolate());
1663  ExternalReference new_space_allocation_limit =
1664  ExternalReference::new_space_allocation_limit_address(isolate());
1665  intptr_t top =
1666  reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1667  intptr_t limit =
1668  reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1669  ASSERT((limit - top) == kPointerSize);
1670  ASSERT(result.code() < ip.code());
1671 
1672  // Set up allocation top address.
1673  Register topaddr = scratch1;
1674  mov(topaddr, Operand(new_space_allocation_top));
1675 
1676  // This code stores a temporary value in ip. This is OK, as the code below
1677  // does not need ip for implicit literal generation.
1678  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1679  // Load allocation top into result and allocation limit into ip.
1680  ldm(ia, topaddr, result.bit() | ip.bit());
1681  } else {
1682  if (emit_debug_code()) {
1683  // Assert that result actually contains top on entry. ip is used
1684  // immediately below so this use of ip does not cause difference with
1685  // respect to register content between debug and release mode.
1686  ldr(ip, MemOperand(topaddr));
1687  cmp(result, ip);
1688  Check(eq, "Unexpected allocation top");
1689  }
1690  // Load allocation limit into ip. Result already contains allocation top.
1691  ldr(ip, MemOperand(topaddr, limit - top));
1692  }
1693 
1694  // Calculate new top and bail out if new space is exhausted. Use result
1695  // to calculate the new top. Object size may be in words so a shift is
1696  // required to get the number of bytes.
1697  if ((flags & SIZE_IN_WORDS) != 0) {
1698  add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1699  } else {
1700  add(scratch2, result, Operand(object_size), SetCC);
1701  }
1702  b(cs, gc_required);
1703  cmp(scratch2, Operand(ip));
1704  b(hi, gc_required);
1705 
1706  // Update allocation top. result temporarily holds the new top.
1707  if (emit_debug_code()) {
1708  tst(scratch2, Operand(kObjectAlignmentMask));
1709  Check(eq, "Unaligned allocation in new space");
1710  }
1711  str(scratch2, MemOperand(topaddr));
1712 
1713  // Tag object if requested.
1714  if ((flags & TAG_OBJECT) != 0) {
1715  add(result, result, Operand(kHeapObjectTag));
1716  }
1717 }
1718 
1719 
1720 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1721  Register scratch) {
1722  ExternalReference new_space_allocation_top =
1723  ExternalReference::new_space_allocation_top_address(isolate());
1724 
1725  // Make sure the object has no tag before resetting top.
1726  and_(object, object, Operand(~kHeapObjectTagMask));
1727 #ifdef DEBUG
1728  // Check that the object un-allocated is below the current top.
1729  mov(scratch, Operand(new_space_allocation_top));
1730  ldr(scratch, MemOperand(scratch));
1731  cmp(object, scratch);
1732  Check(lt, "Undo allocation of non allocated memory");
1733 #endif
1734  // Write the address of the object to un-allocate as the current top.
1735  mov(scratch, Operand(new_space_allocation_top));
1736  str(object, MemOperand(scratch));
1737 }
1738 
1739 
1740 void MacroAssembler::AllocateTwoByteString(Register result,
1741  Register length,
1742  Register scratch1,
1743  Register scratch2,
1744  Register scratch3,
1745  Label* gc_required) {
1746  // Calculate the number of bytes needed for the characters in the string while
1747  // observing object alignment.
1748  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1749  mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1750  add(scratch1, scratch1,
1751  Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1752  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1753 
1754  // Allocate two-byte string in new space.
1755  AllocateInNewSpace(scratch1,
1756  result,
1757  scratch2,
1758  scratch3,
1759  gc_required,
1760  TAG_OBJECT);
1761 
1762  // Set the map, length and hash field.
1763  InitializeNewString(result,
1764  length,
1765  Heap::kStringMapRootIndex,
1766  scratch1,
1767  scratch2);
1768 }
1769 
1770 
1771 void MacroAssembler::AllocateAsciiString(Register result,
1772  Register length,
1773  Register scratch1,
1774  Register scratch2,
1775  Register scratch3,
1776  Label* gc_required) {
1777  // Calculate the number of bytes needed for the characters in the string while
1778  // observing object alignment.
1779  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
1780  ASSERT(kCharSize == 1);
1781  add(scratch1, length,
1782  Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
1783  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1784 
1785  // Allocate ASCII string in new space.
1786  AllocateInNewSpace(scratch1,
1787  result,
1788  scratch2,
1789  scratch3,
1790  gc_required,
1791  TAG_OBJECT);
1792 
1793  // Set the map, length and hash field.
1794  InitializeNewString(result,
1795  length,
1796  Heap::kAsciiStringMapRootIndex,
1797  scratch1,
1798  scratch2);
1799 }
1800 
1801 
1802 void MacroAssembler::AllocateTwoByteConsString(Register result,
1803  Register length,
1804  Register scratch1,
1805  Register scratch2,
1806  Label* gc_required) {
1807  AllocateInNewSpace(ConsString::kSize,
1808  result,
1809  scratch1,
1810  scratch2,
1811  gc_required,
1812  TAG_OBJECT);
1813 
1814  InitializeNewString(result,
1815  length,
1816  Heap::kConsStringMapRootIndex,
1817  scratch1,
1818  scratch2);
1819 }
1820 
1821 
1822 void MacroAssembler::AllocateAsciiConsString(Register result,
1823  Register length,
1824  Register scratch1,
1825  Register scratch2,
1826  Label* gc_required) {
1827  AllocateInNewSpace(ConsString::kSize,
1828  result,
1829  scratch1,
1830  scratch2,
1831  gc_required,
1832  TAG_OBJECT);
1833 
1834  InitializeNewString(result,
1835  length,
1836  Heap::kConsAsciiStringMapRootIndex,
1837  scratch1,
1838  scratch2);
1839 }
1840 
1841 
1842 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1843  Register length,
1844  Register scratch1,
1845  Register scratch2,
1846  Label* gc_required) {
1847  AllocateInNewSpace(SlicedString::kSize,
1848  result,
1849  scratch1,
1850  scratch2,
1851  gc_required,
1852  TAG_OBJECT);
1853 
1854  InitializeNewString(result,
1855  length,
1856  Heap::kSlicedStringMapRootIndex,
1857  scratch1,
1858  scratch2);
1859 }
1860 
1861 
1862 void MacroAssembler::AllocateAsciiSlicedString(Register result,
1863  Register length,
1864  Register scratch1,
1865  Register scratch2,
1866  Label* gc_required) {
1867  AllocateInNewSpace(SlicedString::kSize,
1868  result,
1869  scratch1,
1870  scratch2,
1871  gc_required,
1872  TAG_OBJECT);
1873 
1874  InitializeNewString(result,
1875  length,
1876  Heap::kSlicedAsciiStringMapRootIndex,
1877  scratch1,
1878  scratch2);
1879 }
1880 
1881 
1882 void MacroAssembler::CompareObjectType(Register object,
1883  Register map,
1884  Register type_reg,
1885  InstanceType type) {
1886  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
1887  CompareInstanceType(map, type_reg, type);
1888 }
1889 
1890 
1891 void MacroAssembler::CompareInstanceType(Register map,
1892  Register type_reg,
1893  InstanceType type) {
1894  ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1895  cmp(type_reg, Operand(type));
1896 }
1897 
1898 
1899 void MacroAssembler::CompareRoot(Register obj,
1900  Heap::RootListIndex index) {
1901  ASSERT(!obj.is(ip));
1902  LoadRoot(ip, index);
1903  cmp(obj, ip);
1904 }
1905 
1906 
1907 void MacroAssembler::CheckFastElements(Register map,
1908  Register scratch,
1909  Label* fail) {
1914  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1915  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1916  b(hi, fail);
1917 }
1918 
1919 
1920 void MacroAssembler::CheckFastObjectElements(Register map,
1921  Register scratch,
1922  Label* fail) {
1927  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1928  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1929  b(ls, fail);
1930  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1931  b(hi, fail);
1932 }
1933 
1934 
1935 void MacroAssembler::CheckFastSmiElements(Register map,
1936  Register scratch,
1937  Label* fail) {
1940  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1941  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1942  b(hi, fail);
1943 }
1944 
1945 
1946 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
1947  Register key_reg,
1948  Register receiver_reg,
1949  Register elements_reg,
1950  Register scratch1,
1951  Register scratch2,
1952  Register scratch3,
1953  Register scratch4,
1954  Label* fail) {
1955  Label smi_value, maybe_nan, have_double_value, is_nan, done;
1956  Register mantissa_reg = scratch2;
1957  Register exponent_reg = scratch3;
1958 
1959  // Handle smi values specially.
1960  JumpIfSmi(value_reg, &smi_value);
1961 
1962  // Ensure that the object is a heap number
1963  CheckMap(value_reg,
1964  scratch1,
1965  isolate()->factory()->heap_number_map(),
1966  fail,
1968 
1969  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
1970  // in the exponent.
1971  mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
1972  ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
1973  cmp(exponent_reg, scratch1);
1974  b(ge, &maybe_nan);
1975 
1976  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
1977 
1978  bind(&have_double_value);
1979  add(scratch1, elements_reg,
1980  Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
1981  str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
1982  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
1983  str(exponent_reg, FieldMemOperand(scratch1, offset));
1984  jmp(&done);
1985 
1986  bind(&maybe_nan);
1987  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
1988  // it's an Infinity, and the non-NaN code path applies.
1989  b(gt, &is_nan);
1990  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
1991  cmp(mantissa_reg, Operand(0));
1992  b(eq, &have_double_value);
1993  bind(&is_nan);
1994  // Load canonical NaN for storing into the double array.
1995  uint64_t nan_int64 = BitCast<uint64_t>(
1996  FixedDoubleArray::canonical_not_the_hole_nan_as_double());
1997  mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
1998  mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
1999  jmp(&have_double_value);
2000 
2001  bind(&smi_value);
2002  add(scratch1, elements_reg,
2003  Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2004  add(scratch1, scratch1,
2005  Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
2006  // scratch1 is now effective address of the double element
2007 
2008  FloatingPointHelper::Destination destination;
2009  if (CpuFeatures::IsSupported(VFP2)) {
2010  destination = FloatingPointHelper::kVFPRegisters;
2011  } else {
2012  destination = FloatingPointHelper::kCoreRegisters;
2013  }
2014 
2015  Register untagged_value = elements_reg;
2016  SmiUntag(untagged_value, value_reg);
2017  FloatingPointHelper::ConvertIntToDouble(this,
2018  untagged_value,
2019  destination,
2020  d0,
2021  mantissa_reg,
2022  exponent_reg,
2023  scratch4,
2024  s2);
2025  if (destination == FloatingPointHelper::kVFPRegisters) {
2026  CpuFeatures::Scope scope(VFP2);
2027  vstr(d0, scratch1, 0);
2028  } else {
2029  str(mantissa_reg, MemOperand(scratch1, 0));
2030  str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
2031  }
2032  bind(&done);
2033 }
2034 
2035 
2036 void MacroAssembler::CompareMap(Register obj,
2037  Register scratch,
2038  Handle<Map> map,
2039  Label* early_success,
2040  CompareMapMode mode) {
2041  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2042  CompareMap(scratch, map, early_success, mode);
2043 }
2044 
2045 
2046 void MacroAssembler::CompareMap(Register obj_map,
2047  Handle<Map> map,
2048  Label* early_success,
2049  CompareMapMode mode) {
2050  cmp(obj_map, Operand(map));
2051  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
2052  ElementsKind kind = map->elements_kind();
2053  if (IsFastElementsKind(kind)) {
2054  bool packed = IsFastPackedElementsKind(kind);
2055  Map* current_map = *map;
2056  while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
2057  kind = GetNextMoreGeneralFastElementsKind(kind, packed);
2058  current_map = current_map->LookupElementsTransitionMap(kind);
2059  if (!current_map) break;
2060  b(eq, early_success);
2061  cmp(obj_map, Operand(Handle<Map>(current_map)));
2062  }
2063  }
2064  }
2065 }
2066 
2067 
2068 void MacroAssembler::CheckMap(Register obj,
2069  Register scratch,
2070  Handle<Map> map,
2071  Label* fail,
2072  SmiCheckType smi_check_type,
2073  CompareMapMode mode) {
2074  if (smi_check_type == DO_SMI_CHECK) {
2075  JumpIfSmi(obj, fail);
2076  }
2077 
2078  Label success;
2079  CompareMap(obj, scratch, map, &success, mode);
2080  b(ne, fail);
2081  bind(&success);
2082 }
2083 
2084 
2085 void MacroAssembler::CheckMap(Register obj,
2086  Register scratch,
2087  Heap::RootListIndex index,
2088  Label* fail,
2089  SmiCheckType smi_check_type) {
2090  if (smi_check_type == DO_SMI_CHECK) {
2091  JumpIfSmi(obj, fail);
2092  }
2093  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2094  LoadRoot(ip, index);
2095  cmp(scratch, ip);
2096  b(ne, fail);
2097 }
2098 
2099 
2100 void MacroAssembler::DispatchMap(Register obj,
2101  Register scratch,
2102  Handle<Map> map,
2103  Handle<Code> success,
2104  SmiCheckType smi_check_type) {
2105  Label fail;
2106  if (smi_check_type == DO_SMI_CHECK) {
2107  JumpIfSmi(obj, &fail);
2108  }
2109  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2110  mov(ip, Operand(map));
2111  cmp(scratch, ip);
2112  Jump(success, RelocInfo::CODE_TARGET, eq);
2113  bind(&fail);
2114 }
2115 
2116 
2117 void MacroAssembler::TryGetFunctionPrototype(Register function,
2118  Register result,
2119  Register scratch,
2120  Label* miss,
2121  bool miss_on_bound_function) {
2122  // Check that the receiver isn't a smi.
2123  JumpIfSmi(function, miss);
2124 
2125  // Check that the function really is a function. Load map into result reg.
2126  CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2127  b(ne, miss);
2128 
2129  if (miss_on_bound_function) {
2130  ldr(scratch,
2131  FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2132  ldr(scratch,
2133  FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2134  tst(scratch,
2135  Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2136  b(ne, miss);
2137  }
2138 
2139  // Make sure that the function has an instance prototype.
2140  Label non_instance;
2141  ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2142  tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2143  b(ne, &non_instance);
2144 
2145  // Get the prototype or initial map from the function.
2146  ldr(result,
2147  FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2148 
2149  // If the prototype or initial map is the hole, don't return it and
2150  // simply miss the cache instead. This will allow us to allocate a
2151  // prototype object on-demand in the runtime system.
2152  LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2153  cmp(result, ip);
2154  b(eq, miss);
2155 
2156  // If the function does not have an initial map, we're done.
2157  Label done;
2158  CompareObjectType(result, scratch, scratch, MAP_TYPE);
2159  b(ne, &done);
2160 
2161  // Get the prototype from the initial map.
2162  ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2163  jmp(&done);
2164 
2165  // Non-instance prototype: Fetch prototype from constructor field
2166  // in initial map.
2167  bind(&non_instance);
2168  ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2169 
2170  // All done.
2171  bind(&done);
2172 }
2173 
2174 
2175 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
2176  ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2177  Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(), cond);
2178 }
2179 
2180 
2181 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2182  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
2183  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2184 }
2185 
2186 
2187 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2188  return ref0.address() - ref1.address();
2189 }
2190 
2191 
2192 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
2193  int stack_space) {
2194  ExternalReference next_address =
2195  ExternalReference::handle_scope_next_address();
2196  const int kNextOffset = 0;
2197  const int kLimitOffset = AddressOffset(
2198  ExternalReference::handle_scope_limit_address(),
2199  next_address);
2200  const int kLevelOffset = AddressOffset(
2201  ExternalReference::handle_scope_level_address(),
2202  next_address);
2203 
2204  // Allocate HandleScope in callee-save registers.
2205  mov(r7, Operand(next_address));
2206  ldr(r4, MemOperand(r7, kNextOffset));
2207  ldr(r5, MemOperand(r7, kLimitOffset));
2208  ldr(r6, MemOperand(r7, kLevelOffset));
2209  add(r6, r6, Operand(1));
2210  str(r6, MemOperand(r7, kLevelOffset));
2211 
2212  // Native call returns to the DirectCEntry stub which redirects to the
2213  // return address pushed on stack (could have moved after GC).
2214  // DirectCEntry stub itself is generated early and never moves.
2215  DirectCEntryStub stub;
2216  stub.GenerateCall(this, function);
2217 
2218  Label promote_scheduled_exception;
2219  Label delete_allocated_handles;
2220  Label leave_exit_frame;
2221 
2222  // If result is non-zero, dereference to get the result value
2223  // otherwise set it to undefined.
2224  cmp(r0, Operand(0));
2225  LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2226  ldr(r0, MemOperand(r0), ne);
2227 
2228  // No more valid handles (the result handle was the last one). Restore
2229  // previous handle scope.
2230  str(r4, MemOperand(r7, kNextOffset));
2231  if (emit_debug_code()) {
2232  ldr(r1, MemOperand(r7, kLevelOffset));
2233  cmp(r1, r6);
2234  Check(eq, "Unexpected level after return from api call");
2235  }
2236  sub(r6, r6, Operand(1));
2237  str(r6, MemOperand(r7, kLevelOffset));
2238  ldr(ip, MemOperand(r7, kLimitOffset));
2239  cmp(r5, ip);
2240  b(ne, &delete_allocated_handles);
2241 
2242  // Check if the function scheduled an exception.
2243  bind(&leave_exit_frame);
2244  LoadRoot(r4, Heap::kTheHoleValueRootIndex);
2245  mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2246  ldr(r5, MemOperand(ip));
2247  cmp(r4, r5);
2248  b(ne, &promote_scheduled_exception);
2249 
2250  // LeaveExitFrame expects unwind space to be in a register.
2251  mov(r4, Operand(stack_space));
2252  LeaveExitFrame(false, r4);
2253  mov(pc, lr);
2254 
2255  bind(&promote_scheduled_exception);
2256  TailCallExternalReference(
2257  ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2258  0,
2259  1);
2260 
2261  // HandleScope limit has changed. Delete allocated extensions.
2262  bind(&delete_allocated_handles);
2263  str(r5, MemOperand(r7, kLimitOffset));
2264  mov(r4, r0);
2265  PrepareCallCFunction(1, r5);
2266  mov(r0, Operand(ExternalReference::isolate_address()));
2267  CallCFunction(
2268  ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2269  mov(r0, r4);
2270  jmp(&leave_exit_frame);
2271 }
2272 
2273 
2274 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2275  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
2276  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
2277 }
2278 
2279 
2280 void MacroAssembler::IllegalOperation(int num_arguments) {
2281  if (num_arguments > 0) {
2282  add(sp, sp, Operand(num_arguments * kPointerSize));
2283  }
2284  LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2285 }
2286 
2287 
2288 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2289  // If the hash field contains an array index pick it out. The assert checks
2290  // that the constants for the maximum number of digits for an array index
2291  // cached in the hash field and the number of bits reserved for it does not
2292  // conflict.
2293  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2294  (1 << String::kArrayIndexValueBits));
2295  // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
2296  // the low kHashShift bits.
2297  STATIC_ASSERT(kSmiTag == 0);
2298  Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2299  mov(index, Operand(hash, LSL, kSmiTagSize));
2300 }
2301 
2302 
2303 void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
2304  Register outHighReg,
2305  Register outLowReg) {
2306  // ARMv7 VFP3 instructions to implement integer to double conversion.
2307  mov(r7, Operand(inReg, ASR, kSmiTagSize));
2308  vmov(s15, r7);
2309  vcvt_f64_s32(d7, s15);
2310  vmov(outLowReg, outHighReg, d7);
2311 }
2312 
2313 
2314 void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
2315  DwVfpRegister result,
2316  Register scratch1,
2317  Register scratch2,
2318  Register heap_number_map,
2319  SwVfpRegister scratch3,
2320  Label* not_number,
2321  ObjectToDoubleFlags flags) {
2322  Label done;
2323  if ((flags & OBJECT_NOT_SMI) == 0) {
2324  Label not_smi;
2325  JumpIfNotSmi(object, &not_smi);
2326  // Remove smi tag and convert to double.
2327  mov(scratch1, Operand(object, ASR, kSmiTagSize));
2328  vmov(scratch3, scratch1);
2329  vcvt_f64_s32(result, scratch3);
2330  b(&done);
2331  bind(&not_smi);
2332  }
2333  // Check for heap number and load double value from it.
2334  ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
2335  sub(scratch2, object, Operand(kHeapObjectTag));
2336  cmp(scratch1, heap_number_map);
2337  b(ne, not_number);
2338  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
2339  // If exponent is all ones the number is either a NaN or +/-Infinity.
2340  ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
2341  Sbfx(scratch1,
2342  scratch1,
2343  HeapNumber::kExponentShift,
2344  HeapNumber::kExponentBits);
2345  // All-one value sign extend to -1.
2346  cmp(scratch1, Operand(-1));
2347  b(eq, not_number);
2348  }
2349  vldr(result, scratch2, HeapNumber::kValueOffset);
2350  bind(&done);
2351 }
2352 
2353 
2354 void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
2355  DwVfpRegister value,
2356  Register scratch1,
2357  SwVfpRegister scratch2) {
2358  mov(scratch1, Operand(smi, ASR, kSmiTagSize));
2359  vmov(scratch2, scratch1);
2360  vcvt_f64_s32(value, scratch2);
2361 }
2362 
2363 
2364 // Tries to get a signed int32 out of a double precision floating point heap
2365 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
2366 // 32bits signed integer range.
2367 void MacroAssembler::ConvertToInt32(Register source,
2368  Register dest,
2369  Register scratch,
2370  Register scratch2,
2371  DwVfpRegister double_scratch,
2372  Label *not_int32) {
2373  if (CpuFeatures::IsSupported(VFP2)) {
2374  CpuFeatures::Scope scope(VFP2);
2375  sub(scratch, source, Operand(kHeapObjectTag));
2376  vldr(double_scratch, scratch, HeapNumber::kValueOffset);
2377  vcvt_s32_f64(double_scratch.low(), double_scratch);
2378  vmov(dest, double_scratch.low());
2379  // Signed vcvt instruction will saturate to the minimum (0x80000000) or
2380  // maximun (0x7fffffff) signed 32bits integer when the double is out of
2381  // range. When substracting one, the minimum signed integer becomes the
2382  // maximun signed integer.
2383  sub(scratch, dest, Operand(1));
2384  cmp(scratch, Operand(LONG_MAX - 1));
2385  // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
2386  b(ge, not_int32);
2387  } else {
2388  // This code is faster for doubles that are in the ranges -0x7fffffff to
2389  // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
2390  // the range of signed int32 values that are not Smis. Jumps to the label
2391  // 'not_int32' if the double isn't in the range -0x80000000.0 to
2392  // 0x80000000.0 (excluding the endpoints).
2393  Label right_exponent, done;
2394  // Get exponent word.
2395  ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
2396  // Get exponent alone in scratch2.
2397  Ubfx(scratch2,
2398  scratch,
2399  HeapNumber::kExponentShift,
2400  HeapNumber::kExponentBits);
2401  // Load dest with zero. We use this either for the final shift or
2402  // for the answer.
2403  mov(dest, Operand(0, RelocInfo::NONE));
2404  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
2405  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
2406  // the exponent that we are fastest at and also the highest exponent we can
2407  // handle here.
2408  const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
2409  // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
2410  // split it up to avoid a constant pool entry. You can't do that in general
2411  // for cmp because of the overflow flag, but we know the exponent is in the
2412  // range 0-2047 so there is no overflow.
2413  int fudge_factor = 0x400;
2414  sub(scratch2, scratch2, Operand(fudge_factor));
2415  cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
2416  // If we have a match of the int32-but-not-Smi exponent then skip some
2417  // logic.
2418  b(eq, &right_exponent);
2419  // If the exponent is higher than that then go to slow case. This catches
2420  // numbers that don't fit in a signed int32, infinities and NaNs.
2421  b(gt, not_int32);
2422 
2423  // We know the exponent is smaller than 30 (biased). If it is less than
2424  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
2425  // it rounds to zero.
2426  const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
2427  sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
2428  // Dest already has a Smi zero.
2429  b(lt, &done);
2430 
2431  // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
2432  // get how much to shift down.
2433  rsb(dest, scratch2, Operand(30));
2434 
2435  bind(&right_exponent);
2436  // Get the top bits of the mantissa.
2437  and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
2438  // Put back the implicit 1.
2439  orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
2440  // Shift up the mantissa bits to take up the space the exponent used to
2441  // take. We just orred in the implicit bit so that took care of one and
2442  // we want to leave the sign bit 0 so we subtract 2 bits from the shift
2443  // distance.
2444  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
2445  mov(scratch2, Operand(scratch2, LSL, shift_distance));
2446  // Put sign in zero flag.
2447  tst(scratch, Operand(HeapNumber::kSignMask));
2448  // Get the second half of the double. For some exponents we don't
2449  // actually need this because the bits get shifted out again, but
2450  // it's probably slower to test than just to do it.
2451  ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
2452  // Shift down 22 bits to get the last 10 bits.
2453  orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
2454  // Move down according to the exponent.
2455  mov(dest, Operand(scratch, LSR, dest));
2456  // Fix sign if sign bit was set.
2457  rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
2458  bind(&done);
2459  }
2460 }
2461 
2462 
2463 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
2464  Register result,
2465  DwVfpRegister double_input,
2466  Register scratch,
2467  DwVfpRegister double_scratch,
2468  CheckForInexactConversion check_inexact) {
2469  ASSERT(!result.is(scratch));
2470  ASSERT(!double_input.is(double_scratch));
2471 
2472  ASSERT(CpuFeatures::IsSupported(VFP2));
2473  CpuFeatures::Scope scope(VFP2);
2474  Register prev_fpscr = result;
2475  Label done;
2476 
2477  // Test for values that can be exactly represented as a signed 32-bit integer.
2478  vcvt_s32_f64(double_scratch.low(), double_input);
2479  vmov(result, double_scratch.low());
2480  vcvt_f64_s32(double_scratch, double_scratch.low());
2481  VFPCompareAndSetFlags(double_input, double_scratch);
2482  b(eq, &done);
2483 
2484  // Convert to integer, respecting rounding mode.
2485  int32_t check_inexact_conversion =
2486  (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
2487 
2488  // Set custom FPCSR:
2489  // - Set rounding mode.
2490  // - Clear vfp cumulative exception flags.
2491  // - Make sure Flush-to-zero mode control bit is unset.
2492  vmrs(prev_fpscr);
2493  bic(scratch,
2494  prev_fpscr,
2495  Operand(kVFPExceptionMask |
2496  check_inexact_conversion |
2499  // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
2500  if (rounding_mode != kRoundToNearest) {
2501  orr(scratch, scratch, Operand(rounding_mode));
2502  }
2503  vmsr(scratch);
2504 
2505  // Convert the argument to an integer.
2506  vcvt_s32_f64(double_scratch.low(),
2507  double_input,
2508  (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
2509  : kFPSCRRounding);
2510 
2511  // Retrieve FPSCR.
2512  vmrs(scratch);
2513  // Restore FPSCR.
2514  vmsr(prev_fpscr);
2515  // Move the converted value into the result register.
2516  vmov(result, double_scratch.low());
2517  // Check for vfp exceptions.
2518  tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
2519 
2520  bind(&done);
2521 }
2522 
2523 
2524 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2525  Register input_high,
2526  Register input_low,
2527  Register scratch) {
2528  Label done, normal_exponent, restore_sign;
2529 
2530  // Extract the biased exponent in result.
2531  Ubfx(result,
2532  input_high,
2533  HeapNumber::kExponentShift,
2534  HeapNumber::kExponentBits);
2535 
2536  // Check for Infinity and NaNs, which should return 0.
2537  cmp(result, Operand(HeapNumber::kExponentMask));
2538  mov(result, Operand(0), LeaveCC, eq);
2539  b(eq, &done);
2540 
2541  // Express exponent as delta to (number of mantissa bits + 31).
2542  sub(result,
2543  result,
2544  Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
2545  SetCC);
2546 
2547  // If the delta is strictly positive, all bits would be shifted away,
2548  // which means that we can return 0.
2549  b(le, &normal_exponent);
2550  mov(result, Operand(0));
2551  b(&done);
2552 
2553  bind(&normal_exponent);
2554  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2555  // Calculate shift.
2556  add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
2557 
2558  // Save the sign.
2559  Register sign = result;
2560  result = no_reg;
2561  and_(sign, input_high, Operand(HeapNumber::kSignMask));
2562 
2563  // Set the implicit 1 before the mantissa part in input_high.
2564  orr(input_high,
2565  input_high,
2566  Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2567  // Shift the mantissa bits to the correct position.
2568  // We don't need to clear non-mantissa bits as they will be shifted away.
2569  // If they weren't, it would mean that the answer is in the 32bit range.
2570  mov(input_high, Operand(input_high, LSL, scratch));
2571 
2572  // Replace the shifted bits with bits from the lower mantissa word.
2573  Label pos_shift, shift_done;
2574  rsb(scratch, scratch, Operand(32), SetCC);
2575  b(&pos_shift, ge);
2576 
2577  // Negate scratch.
2578  rsb(scratch, scratch, Operand(0));
2579  mov(input_low, Operand(input_low, LSL, scratch));
2580  b(&shift_done);
2581 
2582  bind(&pos_shift);
2583  mov(input_low, Operand(input_low, LSR, scratch));
2584 
2585  bind(&shift_done);
2586  orr(input_high, input_high, Operand(input_low));
2587  // Restore sign if necessary.
2588  cmp(sign, Operand(0));
2589  result = sign;
2590  sign = no_reg;
2591  rsb(result, input_high, Operand(0), LeaveCC, ne);
2592  mov(result, input_high, LeaveCC, eq);
2593  bind(&done);
2594 }
2595 
2596 
2597 void MacroAssembler::EmitECMATruncate(Register result,
2598  DwVfpRegister double_input,
2599  SwVfpRegister single_scratch,
2600  Register scratch,
2601  Register input_high,
2602  Register input_low) {
2603  CpuFeatures::Scope scope(VFP2);
2604  ASSERT(!input_high.is(result));
2605  ASSERT(!input_low.is(result));
2606  ASSERT(!input_low.is(input_high));
2607  ASSERT(!scratch.is(result) &&
2608  !scratch.is(input_high) &&
2609  !scratch.is(input_low));
2610  ASSERT(!single_scratch.is(double_input.low()) &&
2611  !single_scratch.is(double_input.high()));
2612 
2613  Label done;
2614 
2615  // Clear cumulative exception flags.
2616  ClearFPSCRBits(kVFPExceptionMask, scratch);
2617  // Try a conversion to a signed integer.
2618  vcvt_s32_f64(single_scratch, double_input);
2619  vmov(result, single_scratch);
2620  // Retrieve he FPSCR.
2621  vmrs(scratch);
2622  // Check for overflow and NaNs.
2623  tst(scratch, Operand(kVFPOverflowExceptionBit |
2626  // If we had no exceptions we are done.
2627  b(eq, &done);
2628 
2629  // Load the double value and perform a manual truncation.
2630  vmov(input_low, input_high, double_input);
2631  EmitOutOfInt32RangeTruncate(result,
2632  input_high,
2633  input_low,
2634  scratch);
2635  bind(&done);
2636 }
2637 
2638 
2639 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2640  Register src,
2641  int num_least_bits) {
2642  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2643  ubfx(dst, src, kSmiTagSize, num_least_bits);
2644  } else {
2645  mov(dst, Operand(src, ASR, kSmiTagSize));
2646  and_(dst, dst, Operand((1 << num_least_bits) - 1));
2647  }
2648 }
2649 
2650 
2651 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2652  Register src,
2653  int num_least_bits) {
2654  and_(dst, src, Operand((1 << num_least_bits) - 1));
2655 }
2656 
2657 
2658 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2659  int num_arguments) {
2660  // All parameters are on the stack. r0 has the return value after call.
2661 
2662  // If the expected number of arguments of the runtime function is
2663  // constant, we check that the actual number of arguments match the
2664  // expectation.
2665  if (f->nargs >= 0 && f->nargs != num_arguments) {
2666  IllegalOperation(num_arguments);
2667  return;
2668  }
2669 
2670  // TODO(1236192): Most runtime routines don't need the number of
2671  // arguments passed in because it is constant. At some point we
2672  // should remove this need and make the runtime routine entry code
2673  // smarter.
2674  mov(r0, Operand(num_arguments));
2675  mov(r1, Operand(ExternalReference(f, isolate())));
2676  CEntryStub stub(1);
2677  CallStub(&stub);
2678 }
2679 
2680 
2681 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
2682  CallRuntime(Runtime::FunctionForId(fid), num_arguments);
2683 }
2684 
2685 
2686 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
2687  const Runtime::Function* function = Runtime::FunctionForId(id);
2688  mov(r0, Operand(function->nargs));
2689  mov(r1, Operand(ExternalReference(function, isolate())));
2690  CEntryStub stub(1, kSaveFPRegs);
2691  CallStub(&stub);
2692 }
2693 
2694 
2695 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2696  int num_arguments) {
2697  mov(r0, Operand(num_arguments));
2698  mov(r1, Operand(ext));
2699 
2700  CEntryStub stub(1);
2701  CallStub(&stub);
2702 }
2703 
2704 
2705 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2706  int num_arguments,
2707  int result_size) {
2708  // TODO(1236192): Most runtime routines don't need the number of
2709  // arguments passed in because it is constant. At some point we
2710  // should remove this need and make the runtime routine entry code
2711  // smarter.
2712  mov(r0, Operand(num_arguments));
2713  JumpToExternalReference(ext);
2714 }
2715 
2716 
2717 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2718  int num_arguments,
2719  int result_size) {
2720  TailCallExternalReference(ExternalReference(fid, isolate()),
2721  num_arguments,
2722  result_size);
2723 }
2724 
2725 
2726 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2727 #if defined(__thumb__)
2728  // Thumb mode builtin.
2729  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2730 #endif
2731  mov(r1, Operand(builtin));
2732  CEntryStub stub(1);
2733  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2734 }
2735 
2736 
2737 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2738  InvokeFlag flag,
2739  const CallWrapper& call_wrapper) {
2740  // You can't call a builtin without a valid frame.
2741  ASSERT(flag == JUMP_FUNCTION || has_frame());
2742 
2743  GetBuiltinEntry(r2, id);
2744  if (flag == CALL_FUNCTION) {
2745  call_wrapper.BeforeCall(CallSize(r2));
2746  SetCallKind(r5, CALL_AS_METHOD);
2747  Call(r2);
2748  call_wrapper.AfterCall();
2749  } else {
2750  ASSERT(flag == JUMP_FUNCTION);
2751  SetCallKind(r5, CALL_AS_METHOD);
2752  Jump(r2);
2753  }
2754 }
2755 
2756 
2757 void MacroAssembler::GetBuiltinFunction(Register target,
2758  Builtins::JavaScript id) {
2759  // Load the builtins object into target register.
2760  ldr(target,
2761  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2762  ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2763  // Load the JavaScript builtin function from the builtins object.
2764  ldr(target, FieldMemOperand(target,
2765  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2766 }
2767 
2768 
2769 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2770  ASSERT(!target.is(r1));
2771  GetBuiltinFunction(r1, id);
2772  // Load the code entry point from the builtins object.
2773  ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2774 }
2775 
2776 
2777 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2778  Register scratch1, Register scratch2) {
2779  if (FLAG_native_code_counters && counter->Enabled()) {
2780  mov(scratch1, Operand(value));
2781  mov(scratch2, Operand(ExternalReference(counter)));
2782  str(scratch1, MemOperand(scratch2));
2783  }
2784 }
2785 
2786 
2787 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2788  Register scratch1, Register scratch2) {
2789  ASSERT(value > 0);
2790  if (FLAG_native_code_counters && counter->Enabled()) {
2791  mov(scratch2, Operand(ExternalReference(counter)));
2792  ldr(scratch1, MemOperand(scratch2));
2793  add(scratch1, scratch1, Operand(value));
2794  str(scratch1, MemOperand(scratch2));
2795  }
2796 }
2797 
2798 
2799 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2800  Register scratch1, Register scratch2) {
2801  ASSERT(value > 0);
2802  if (FLAG_native_code_counters && counter->Enabled()) {
2803  mov(scratch2, Operand(ExternalReference(counter)));
2804  ldr(scratch1, MemOperand(scratch2));
2805  sub(scratch1, scratch1, Operand(value));
2806  str(scratch1, MemOperand(scratch2));
2807  }
2808 }
2809 
2810 
2811 void MacroAssembler::Assert(Condition cond, const char* msg) {
2812  if (emit_debug_code())
2813  Check(cond, msg);
2814 }
2815 
2816 
2817 void MacroAssembler::AssertRegisterIsRoot(Register reg,
2818  Heap::RootListIndex index) {
2819  if (emit_debug_code()) {
2820  LoadRoot(ip, index);
2821  cmp(reg, ip);
2822  Check(eq, "Register did not match expected root");
2823  }
2824 }
2825 
2826 
2827 void MacroAssembler::AssertFastElements(Register elements) {
2828  if (emit_debug_code()) {
2829  ASSERT(!elements.is(ip));
2830  Label ok;
2831  push(elements);
2832  ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2833  LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2834  cmp(elements, ip);
2835  b(eq, &ok);
2836  LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2837  cmp(elements, ip);
2838  b(eq, &ok);
2839  LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2840  cmp(elements, ip);
2841  b(eq, &ok);
2842  Abort("JSObject with fast elements map has slow elements");
2843  bind(&ok);
2844  pop(elements);
2845  }
2846 }
2847 
2848 
2849 void MacroAssembler::Check(Condition cond, const char* msg) {
2850  Label L;
2851  b(cond, &L);
2852  Abort(msg);
2853  // will not return here
2854  bind(&L);
2855 }
2856 
2857 
2858 void MacroAssembler::Abort(const char* msg) {
2859  Label abort_start;
2860  bind(&abort_start);
2861  // We want to pass the msg string like a smi to avoid GC
2862  // problems, however msg is not guaranteed to be aligned
2863  // properly. Instead, we pass an aligned pointer that is
2864  // a proper v8 smi, but also pass the alignment difference
2865  // from the real pointer as a smi.
2866  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2867  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2868  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2869 #ifdef DEBUG
2870  if (msg != NULL) {
2871  RecordComment("Abort message: ");
2872  RecordComment(msg);
2873  }
2874 #endif
2875 
2876  mov(r0, Operand(p0));
2877  push(r0);
2878  mov(r0, Operand(Smi::FromInt(p1 - p0)));
2879  push(r0);
2880  // Disable stub call restrictions to always allow calls to abort.
2881  if (!has_frame_) {
2882  // We don't actually want to generate a pile of code for this, so just
2883  // claim there is a stack frame, without generating one.
2884  FrameScope scope(this, StackFrame::NONE);
2885  CallRuntime(Runtime::kAbort, 2);
2886  } else {
2887  CallRuntime(Runtime::kAbort, 2);
2888  }
2889  // will not return here
2890  if (is_const_pool_blocked()) {
2891  // If the calling code cares about the exact number of
2892  // instructions generated, we insert padding here to keep the size
2893  // of the Abort macro constant.
2894  static const int kExpectedAbortInstructions = 10;
2895  int abort_instructions = InstructionsGeneratedSince(&abort_start);
2896  ASSERT(abort_instructions <= kExpectedAbortInstructions);
2897  while (abort_instructions++ < kExpectedAbortInstructions) {
2898  nop();
2899  }
2900  }
2901 }
2902 
2903 
2904 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2905  if (context_chain_length > 0) {
2906  // Move up the chain of contexts to the context containing the slot.
2907  ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2908  for (int i = 1; i < context_chain_length; i++) {
2909  ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2910  }
2911  } else {
2912  // Slot is in the current function context. Move it into the
2913  // destination register in case we store into it (the write barrier
2914  // cannot be allowed to destroy the context in esi).
2915  mov(dst, cp);
2916  }
2917 }
2918 
2919 
2920 void MacroAssembler::LoadTransitionedArrayMapConditional(
2921  ElementsKind expected_kind,
2922  ElementsKind transitioned_kind,
2923  Register map_in_out,
2924  Register scratch,
2925  Label* no_map_match) {
2926  // Load the global or builtins object from the current context.
2927  ldr(scratch,
2928  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2929  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2930 
2931  // Check that the function's map is the same as the expected cached map.
2932  ldr(scratch,
2933  MemOperand(scratch,
2934  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2935  size_t offset = expected_kind * kPointerSize +
2936  FixedArrayBase::kHeaderSize;
2937  ldr(ip, FieldMemOperand(scratch, offset));
2938  cmp(map_in_out, ip);
2939  b(ne, no_map_match);
2940 
2941  // Use the transitioned cached map.
2942  offset = transitioned_kind * kPointerSize +
2943  FixedArrayBase::kHeaderSize;
2944  ldr(map_in_out, FieldMemOperand(scratch, offset));
2945 }
2946 
2947 
2948 void MacroAssembler::LoadInitialArrayMap(
2949  Register function_in, Register scratch,
2950  Register map_out, bool can_have_holes) {
2951  ASSERT(!function_in.is(map_out));
2952  Label done;
2953  ldr(map_out, FieldMemOperand(function_in,
2954  JSFunction::kPrototypeOrInitialMapOffset));
2955  if (!FLAG_smi_only_arrays) {
2956  ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
2957  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2958  kind,
2959  map_out,
2960  scratch,
2961  &done);
2962  } else if (can_have_holes) {
2963  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2965  map_out,
2966  scratch,
2967  &done);
2968  }
2969  bind(&done);
2970 }
2971 
2972 
2973 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2974  // Load the global or builtins object from the current context.
2975  ldr(function,
2976  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2977  // Load the native context from the global or builtins object.
2978  ldr(function, FieldMemOperand(function,
2979  GlobalObject::kNativeContextOffset));
2980  // Load the function from the native context.
2981  ldr(function, MemOperand(function, Context::SlotOffset(index)));
2982 }
2983 
2984 
2985 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2986  Register map,
2987  Register scratch) {
2988  // Load the initial map. The global functions all have initial maps.
2989  ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2990  if (emit_debug_code()) {
2991  Label ok, fail;
2992  CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2993  b(&ok);
2994  bind(&fail);
2995  Abort("Global functions must have initial map");
2996  bind(&ok);
2997  }
2998 }
2999 
3000 
3001 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
3002  Register reg,
3003  Register scratch,
3004  Label* not_power_of_two_or_zero) {
3005  sub(scratch, reg, Operand(1), SetCC);
3006  b(mi, not_power_of_two_or_zero);
3007  tst(scratch, reg);
3008  b(ne, not_power_of_two_or_zero);
3009 }
3010 
3011 
3012 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
3013  Register reg,
3014  Register scratch,
3015  Label* zero_and_neg,
3016  Label* not_power_of_two) {
3017  sub(scratch, reg, Operand(1), SetCC);
3018  b(mi, zero_and_neg);
3019  tst(scratch, reg);
3020  b(ne, not_power_of_two);
3021 }
3022 
3023 
3024 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
3025  Register reg2,
3026  Label* on_not_both_smi) {
3027  STATIC_ASSERT(kSmiTag == 0);
3028  tst(reg1, Operand(kSmiTagMask));
3029  tst(reg2, Operand(kSmiTagMask), eq);
3030  b(ne, on_not_both_smi);
3031 }
3032 
3033 
3034 void MacroAssembler::UntagAndJumpIfSmi(
3035  Register dst, Register src, Label* smi_case) {
3036  STATIC_ASSERT(kSmiTag == 0);
3037  mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
3038  b(cc, smi_case); // Shifter carry is not set for a smi.
3039 }
3040 
3041 
3042 void MacroAssembler::UntagAndJumpIfNotSmi(
3043  Register dst, Register src, Label* non_smi_case) {
3044  STATIC_ASSERT(kSmiTag == 0);
3045  mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
3046  b(cs, non_smi_case); // Shifter carry is set for a non-smi.
3047 }
3048 
3049 
3050 void MacroAssembler::JumpIfEitherSmi(Register reg1,
3051  Register reg2,
3052  Label* on_either_smi) {
3053  STATIC_ASSERT(kSmiTag == 0);
3054  tst(reg1, Operand(kSmiTagMask));
3055  tst(reg2, Operand(kSmiTagMask), ne);
3056  b(eq, on_either_smi);
3057 }
3058 
3059 
3060 void MacroAssembler::AssertNotSmi(Register object) {
3061  if (emit_debug_code()) {
3062  STATIC_ASSERT(kSmiTag == 0);
3063  tst(object, Operand(kSmiTagMask));
3064  Check(ne, "Operand is a smi");
3065  }
3066 }
3067 
3068 
3069 void MacroAssembler::AssertSmi(Register object) {
3070  if (emit_debug_code()) {
3071  STATIC_ASSERT(kSmiTag == 0);
3072  tst(object, Operand(kSmiTagMask));
3073  Check(eq, "Operand is not smi");
3074  }
3075 }
3076 
3077 
3078 void MacroAssembler::AssertString(Register object) {
3079  if (emit_debug_code()) {
3080  STATIC_ASSERT(kSmiTag == 0);
3081  tst(object, Operand(kSmiTagMask));
3082  Check(ne, "Operand is a smi and not a string");
3083  push(object);
3084  ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3085  CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3086  pop(object);
3087  Check(lo, "Operand is not a string");
3088  }
3089 }
3090 
3091 
3092 
3093 void MacroAssembler::AssertRootValue(Register src,
3094  Heap::RootListIndex root_value_index,
3095  const char* message) {
3096  if (emit_debug_code()) {
3097  CompareRoot(src, root_value_index);
3098  Check(eq, message);
3099  }
3100 }
3101 
3102 
3103 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3104  Register heap_number_map,
3105  Register scratch,
3106  Label* on_not_heap_number) {
3107  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3108  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3109  cmp(scratch, heap_number_map);
3110  b(ne, on_not_heap_number);
3111 }
3112 
3113 
3114 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3115  Register first,
3116  Register second,
3117  Register scratch1,
3118  Register scratch2,
3119  Label* failure) {
3120  // Test that both first and second are sequential ASCII strings.
3121  // Assume that they are non-smis.
3122  ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3123  ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3124  ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3125  ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3126 
3127  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3128  scratch2,
3129  scratch1,
3130  scratch2,
3131  failure);
3132 }
3133 
3134 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3135  Register second,
3136  Register scratch1,
3137  Register scratch2,
3138  Label* failure) {
3139  // Check that neither is a smi.
3140  STATIC_ASSERT(kSmiTag == 0);
3141  and_(scratch1, first, Operand(second));
3142  JumpIfSmi(scratch1, failure);
3143  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3144  second,
3145  scratch1,
3146  scratch2,
3147  failure);
3148 }
3149 
3150 
3151 // Allocates a heap number or jumps to the need_gc label if the young space
3152 // is full and a scavenge is needed.
3153 void MacroAssembler::AllocateHeapNumber(Register result,
3154  Register scratch1,
3155  Register scratch2,
3156  Register heap_number_map,
3157  Label* gc_required,
3158  TaggingMode tagging_mode) {
3159  // Allocate an object in the heap for the heap number and tag it as a heap
3160  // object.
3161  AllocateInNewSpace(HeapNumber::kSize,
3162  result,
3163  scratch1,
3164  scratch2,
3165  gc_required,
3166  tagging_mode == TAG_RESULT ? TAG_OBJECT :
3168 
3169  // Store heap number map in the allocated object.
3170  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3171  if (tagging_mode == TAG_RESULT) {
3172  str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3173  } else {
3174  str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3175  }
3176 }
3177 
3178 
3179 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3180  DwVfpRegister value,
3181  Register scratch1,
3182  Register scratch2,
3183  Register heap_number_map,
3184  Label* gc_required) {
3185  AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3186  sub(scratch1, result, Operand(kHeapObjectTag));
3187  vstr(value, scratch1, HeapNumber::kValueOffset);
3188 }
3189 
3190 
3191 // Copies a fixed number of fields of heap objects from src to dst.
3192 void MacroAssembler::CopyFields(Register dst,
3193  Register src,
3194  RegList temps,
3195  int field_count) {
3196  // At least one bit set in the first 15 registers.
3197  ASSERT((temps & ((1 << 15) - 1)) != 0);
3198  ASSERT((temps & dst.bit()) == 0);
3199  ASSERT((temps & src.bit()) == 0);
3200  // Primitive implementation using only one temporary register.
3201 
3202  Register tmp = no_reg;
3203  // Find a temp register in temps list.
3204  for (int i = 0; i < 15; i++) {
3205  if ((temps & (1 << i)) != 0) {
3206  tmp.set_code(i);
3207  break;
3208  }
3209  }
3210  ASSERT(!tmp.is(no_reg));
3211 
3212  for (int i = 0; i < field_count; i++) {
3213  ldr(tmp, FieldMemOperand(src, i * kPointerSize));
3214  str(tmp, FieldMemOperand(dst, i * kPointerSize));
3215  }
3216 }
3217 
3218 
3219 void MacroAssembler::CopyBytes(Register src,
3220  Register dst,
3221  Register length,
3222  Register scratch) {
3223  Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3224 
3225  // Align src before copying in word size chunks.
3226  bind(&align_loop);
3227  cmp(length, Operand(0));
3228  b(eq, &done);
3229  bind(&align_loop_1);
3230  tst(src, Operand(kPointerSize - 1));
3231  b(eq, &word_loop);
3232  ldrb(scratch, MemOperand(src, 1, PostIndex));
3233  strb(scratch, MemOperand(dst, 1, PostIndex));
3234  sub(length, length, Operand(1), SetCC);
3235  b(ne, &byte_loop_1);
3236 
3237  // Copy bytes in word size chunks.
3238  bind(&word_loop);
3239  if (emit_debug_code()) {
3240  tst(src, Operand(kPointerSize - 1));
3241  Assert(eq, "Expecting alignment for CopyBytes");
3242  }
3243  cmp(length, Operand(kPointerSize));
3244  b(lt, &byte_loop);
3245  ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3246  if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3247  str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3248  } else {
3249  strb(scratch, MemOperand(dst, 1, PostIndex));
3250  mov(scratch, Operand(scratch, LSR, 8));
3251  strb(scratch, MemOperand(dst, 1, PostIndex));
3252  mov(scratch, Operand(scratch, LSR, 8));
3253  strb(scratch, MemOperand(dst, 1, PostIndex));
3254  mov(scratch, Operand(scratch, LSR, 8));
3255  strb(scratch, MemOperand(dst, 1, PostIndex));
3256  }
3257  sub(length, length, Operand(kPointerSize));
3258  b(&word_loop);
3259 
3260  // Copy the last bytes if any left.
3261  bind(&byte_loop);
3262  cmp(length, Operand(0));
3263  b(eq, &done);
3264  bind(&byte_loop_1);
3265  ldrb(scratch, MemOperand(src, 1, PostIndex));
3266  strb(scratch, MemOperand(dst, 1, PostIndex));
3267  sub(length, length, Operand(1), SetCC);
3268  b(ne, &byte_loop_1);
3269  bind(&done);
3270 }
3271 
3272 
3273 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3274  Register end_offset,
3275  Register filler) {
3276  Label loop, entry;
3277  b(&entry);
3278  bind(&loop);
3279  str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3280  bind(&entry);
3281  cmp(start_offset, end_offset);
3282  b(lt, &loop);
3283 }
3284 
3285 
3286 void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
3287  Register source, // Input.
3288  Register scratch) {
3289  ASSERT(!zeros.is(source) || !source.is(scratch));
3290  ASSERT(!zeros.is(scratch));
3291  ASSERT(!scratch.is(ip));
3292  ASSERT(!source.is(ip));
3293  ASSERT(!zeros.is(ip));
3294 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
3295  clz(zeros, source); // This instruction is only supported after ARM5.
3296 #else
3297  // Order of the next two lines is important: zeros register
3298  // can be the same as source register.
3299  Move(scratch, source);
3300  mov(zeros, Operand(0, RelocInfo::NONE));
3301  // Top 16.
3302  tst(scratch, Operand(0xffff0000));
3303  add(zeros, zeros, Operand(16), LeaveCC, eq);
3304  mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
3305  // Top 8.
3306  tst(scratch, Operand(0xff000000));
3307  add(zeros, zeros, Operand(8), LeaveCC, eq);
3308  mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
3309  // Top 4.
3310  tst(scratch, Operand(0xf0000000));
3311  add(zeros, zeros, Operand(4), LeaveCC, eq);
3312  mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
3313  // Top 2.
3314  tst(scratch, Operand(0xc0000000));
3315  add(zeros, zeros, Operand(2), LeaveCC, eq);
3316  mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
3317  // Top bit.
3318  tst(scratch, Operand(0x80000000u));
3319  add(zeros, zeros, Operand(1), LeaveCC, eq);
3320 #endif
3321 }
3322 
3323 
3324 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3325  Register first,
3326  Register second,
3327  Register scratch1,
3328  Register scratch2,
3329  Label* failure) {
3330  int kFlatAsciiStringMask =
3332  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
3333  and_(scratch1, first, Operand(kFlatAsciiStringMask));
3334  and_(scratch2, second, Operand(kFlatAsciiStringMask));
3335  cmp(scratch1, Operand(kFlatAsciiStringTag));
3336  // Ignore second test if first test failed.
3337  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
3338  b(ne, failure);
3339 }
3340 
3341 
3342 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3343  Register scratch,
3344  Label* failure) {
3345  int kFlatAsciiStringMask =
3347  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
3348  and_(scratch, type, Operand(kFlatAsciiStringMask));
3349  cmp(scratch, Operand(kFlatAsciiStringTag));
3350  b(ne, failure);
3351 }
3352 
3353 static const int kRegisterPassedArguments = 4;
3354 
3355 
3356 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3357  int num_double_arguments) {
3358  int stack_passed_words = 0;
3359  if (use_eabi_hardfloat()) {
3360  // In the hard floating point calling convention, we can use
3361  // all double registers to pass doubles.
3362  if (num_double_arguments > DoubleRegister::kNumRegisters) {
3363  stack_passed_words +=
3364  2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3365  }
3366  } else {
3367  // In the soft floating point calling convention, every double
3368  // argument is passed using two registers.
3369  num_reg_arguments += 2 * num_double_arguments;
3370  }
3371  // Up to four simple arguments are passed in registers r0..r3.
3372  if (num_reg_arguments > kRegisterPassedArguments) {
3373  stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3374  }
3375  return stack_passed_words;
3376 }
3377 
3378 
3379 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3380  int num_double_arguments,
3381  Register scratch) {
3382  int frame_alignment = ActivationFrameAlignment();
3383  int stack_passed_arguments = CalculateStackPassedWords(
3384  num_reg_arguments, num_double_arguments);
3385  if (frame_alignment > kPointerSize) {
3386  // Make stack end at alignment and make room for num_arguments - 4 words
3387  // and the original value of sp.
3388  mov(scratch, sp);
3389  sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3390  ASSERT(IsPowerOf2(frame_alignment));
3391  and_(sp, sp, Operand(-frame_alignment));
3392  str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3393  } else {
3394  sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3395  }
3396 }
3397 
3398 
3399 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3400  Register scratch) {
3401  PrepareCallCFunction(num_reg_arguments, 0, scratch);
3402 }
3403 
3404 
3405 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3406  ASSERT(CpuFeatures::IsSupported(VFP2));
3407  if (use_eabi_hardfloat()) {
3408  Move(d0, dreg);
3409  } else {
3410  vmov(r0, r1, dreg);
3411  }
3412 }
3413 
3414 
3415 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3416  DoubleRegister dreg2) {
3417  ASSERT(CpuFeatures::IsSupported(VFP2));
3418  if (use_eabi_hardfloat()) {
3419  if (dreg2.is(d0)) {
3420  ASSERT(!dreg1.is(d1));
3421  Move(d1, dreg2);
3422  Move(d0, dreg1);
3423  } else {
3424  Move(d0, dreg1);
3425  Move(d1, dreg2);
3426  }
3427  } else {
3428  vmov(r0, r1, dreg1);
3429  vmov(r2, r3, dreg2);
3430  }
3431 }
3432 
3433 
3434 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3435  Register reg) {
3436  ASSERT(CpuFeatures::IsSupported(VFP2));
3437  if (use_eabi_hardfloat()) {
3438  Move(d0, dreg);
3439  Move(r0, reg);
3440  } else {
3441  Move(r2, reg);
3442  vmov(r0, r1, dreg);
3443  }
3444 }
3445 
3446 
3447 void MacroAssembler::CallCFunction(ExternalReference function,
3448  int num_reg_arguments,
3449  int num_double_arguments) {
3450  mov(ip, Operand(function));
3451  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3452 }
3453 
3454 
3455 void MacroAssembler::CallCFunction(Register function,
3456  int num_reg_arguments,
3457  int num_double_arguments) {
3458  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3459 }
3460 
3461 
3462 void MacroAssembler::CallCFunction(ExternalReference function,
3463  int num_arguments) {
3464  CallCFunction(function, num_arguments, 0);
3465 }
3466 
3467 
3468 void MacroAssembler::CallCFunction(Register function,
3469  int num_arguments) {
3470  CallCFunction(function, num_arguments, 0);
3471 }
3472 
3473 
3474 void MacroAssembler::CallCFunctionHelper(Register function,
3475  int num_reg_arguments,
3476  int num_double_arguments) {
3477  ASSERT(has_frame());
3478  // Make sure that the stack is aligned before calling a C function unless
3479  // running in the simulator. The simulator has its own alignment check which
3480  // provides more information.
3481 #if defined(V8_HOST_ARCH_ARM)
3482  if (emit_debug_code()) {
3483  int frame_alignment = OS::ActivationFrameAlignment();
3484  int frame_alignment_mask = frame_alignment - 1;
3485  if (frame_alignment > kPointerSize) {
3486  ASSERT(IsPowerOf2(frame_alignment));
3487  Label alignment_as_expected;
3488  tst(sp, Operand(frame_alignment_mask));
3489  b(eq, &alignment_as_expected);
3490  // Don't use Check here, as it will call Runtime_Abort possibly
3491  // re-entering here.
3492  stop("Unexpected alignment");
3493  bind(&alignment_as_expected);
3494  }
3495  }
3496 #endif
3497 
3498  // Just call directly. The function called cannot cause a GC, or
3499  // allow preemption, so the return address in the link register
3500  // stays correct.
3501  Call(function);
3502  int stack_passed_arguments = CalculateStackPassedWords(
3503  num_reg_arguments, num_double_arguments);
3504  if (ActivationFrameAlignment() > kPointerSize) {
3505  ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3506  } else {
3507  add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3508  }
3509 }
3510 
3511 
3512 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3513  Register result) {
3514  const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3515  const int32_t kPCRegOffset = 2 * kPointerSize;
3516  ldr(result, MemOperand(ldr_location));
3517  if (emit_debug_code()) {
3518  // Check that the instruction is a ldr reg, [pc + offset] .
3519  and_(result, result, Operand(kLdrPCPattern));
3520  cmp(result, Operand(kLdrPCPattern));
3521  Check(eq, "The instruction to patch should be a load from pc.");
3522  // Result was clobbered. Restore it.
3523  ldr(result, MemOperand(ldr_location));
3524  }
3525  // Get the address of the constant.
3526  and_(result, result, Operand(kLdrOffsetMask));
3527  add(result, ldr_location, Operand(result));
3528  add(result, result, Operand(kPCRegOffset));
3529 }
3530 
3531 
3532 void MacroAssembler::CheckPageFlag(
3533  Register object,
3534  Register scratch,
3535  int mask,
3536  Condition cc,
3537  Label* condition_met) {
3538  Bfc(scratch, object, 0, kPageSizeBits);
3539  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3540  tst(scratch, Operand(mask));
3541  b(cc, condition_met);
3542 }
3543 
3544 
3545 void MacroAssembler::JumpIfBlack(Register object,
3546  Register scratch0,
3547  Register scratch1,
3548  Label* on_black) {
3549  HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3550  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3551 }
3552 
3553 
3554 void MacroAssembler::HasColor(Register object,
3555  Register bitmap_scratch,
3556  Register mask_scratch,
3557  Label* has_color,
3558  int first_bit,
3559  int second_bit) {
3560  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3561 
3562  GetMarkBits(object, bitmap_scratch, mask_scratch);
3563 
3564  Label other_color, word_boundary;
3565  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3566  tst(ip, Operand(mask_scratch));
3567  b(first_bit == 1 ? eq : ne, &other_color);
3568  // Shift left 1 by adding.
3569  add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3570  b(eq, &word_boundary);
3571  tst(ip, Operand(mask_scratch));
3572  b(second_bit == 1 ? ne : eq, has_color);
3573  jmp(&other_color);
3574 
3575  bind(&word_boundary);
3576  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3577  tst(ip, Operand(1));
3578  b(second_bit == 1 ? ne : eq, has_color);
3579  bind(&other_color);
3580 }
3581 
3582 
3583 // Detect some, but not all, common pointer-free objects. This is used by the
3584 // incremental write barrier which doesn't care about oddballs (they are always
3585 // marked black immediately so this code is not hit).
3586 void MacroAssembler::JumpIfDataObject(Register value,
3587  Register scratch,
3588  Label* not_data_object) {
3589  Label is_data_object;
3590  ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3591  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3592  b(eq, &is_data_object);
3594  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3595  // If it's a string and it's not a cons string then it's an object containing
3596  // no GC pointers.
3597  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3598  tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3599  b(ne, not_data_object);
3600  bind(&is_data_object);
3601 }
3602 
3603 
3604 void MacroAssembler::GetMarkBits(Register addr_reg,
3605  Register bitmap_reg,
3606  Register mask_reg) {
3607  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3608  and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3609  Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3610  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3611  Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3612  add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3613  mov(ip, Operand(1));
3614  mov(mask_reg, Operand(ip, LSL, mask_reg));
3615 }
3616 
3617 
3618 void MacroAssembler::EnsureNotWhite(
3619  Register value,
3620  Register bitmap_scratch,
3621  Register mask_scratch,
3622  Register load_scratch,
3623  Label* value_is_white_and_not_data) {
3624  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3625  GetMarkBits(value, bitmap_scratch, mask_scratch);
3626 
3627  // If the value is black or grey we don't need to do anything.
3628  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3629  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3630  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
3631  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3632 
3633  Label done;
3634 
3635  // Since both black and grey have a 1 in the first position and white does
3636  // not have a 1 there we only need to check one bit.
3637  ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3638  tst(mask_scratch, load_scratch);
3639  b(ne, &done);
3640 
3641  if (emit_debug_code()) {
3642  // Check for impossible bit pattern.
3643  Label ok;
3644  // LSL may overflow, making the check conservative.
3645  tst(load_scratch, Operand(mask_scratch, LSL, 1));
3646  b(eq, &ok);
3647  stop("Impossible marking bit pattern");
3648  bind(&ok);
3649  }
3650 
3651  // Value is white. We check whether it is data that doesn't need scanning.
3652  // Currently only checks for HeapNumber and non-cons strings.
3653  Register map = load_scratch; // Holds map while checking type.
3654  Register length = load_scratch; // Holds length of object after testing type.
3655  Label is_data_object;
3656 
3657  // Check for heap-number
3658  ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3659  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3660  mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3661  b(eq, &is_data_object);
3662 
3663  // Check for strings.
3665  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3666  // If it's a string and it's not a cons string then it's an object containing
3667  // no GC pointers.
3668  Register instance_type = load_scratch;
3669  ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3670  tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3671  b(ne, value_is_white_and_not_data);
3672  // It's a non-indirect (non-cons and non-slice) string.
3673  // If it's external, the length is just ExternalString::kSize.
3674  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3675  // External strings are the only ones with the kExternalStringTag bit
3676  // set.
3679  tst(instance_type, Operand(kExternalStringTag));
3680  mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3681  b(ne, &is_data_object);
3682 
3683  // Sequential string, either ASCII or UC16.
3684  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
3685  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3686  // getting the length multiplied by 2.
3688  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3689  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3690  tst(instance_type, Operand(kStringEncodingMask));
3691  mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3692  add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3693  and_(length, length, Operand(~kObjectAlignmentMask));
3694 
3695  bind(&is_data_object);
3696  // Value is a data object, and it is white. Mark it black. Since we know
3697  // that the object is white we can make it black by flipping one bit.
3698  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3699  orr(ip, ip, Operand(mask_scratch));
3700  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3701 
3702  and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3703  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3704  add(ip, ip, Operand(length));
3705  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3706 
3707  bind(&done);
3708 }
3709 
3710 
3711 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3712  Usat(output_reg, 8, Operand(input_reg));
3713 }
3714 
3715 
3716 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3717  DoubleRegister input_reg,
3718  DoubleRegister temp_double_reg) {
3719  Label above_zero;
3720  Label done;
3721  Label in_bounds;
3722 
3723  Vmov(temp_double_reg, 0.0);
3724  VFPCompareAndSetFlags(input_reg, temp_double_reg);
3725  b(gt, &above_zero);
3726 
3727  // Double value is less than zero, NaN or Inf, return 0.
3728  mov(result_reg, Operand(0));
3729  b(al, &done);
3730 
3731  // Double value is >= 255, return 255.
3732  bind(&above_zero);
3733  Vmov(temp_double_reg, 255.0, result_reg);
3734  VFPCompareAndSetFlags(input_reg, temp_double_reg);
3735  b(le, &in_bounds);
3736  mov(result_reg, Operand(255));
3737  b(al, &done);
3738 
3739  // In 0-255 range, round and truncate.
3740  bind(&in_bounds);
3741  // Save FPSCR.
3742  vmrs(ip);
3743  // Set rounding mode to round to the nearest integer by clearing bits[23:22].
3744  bic(result_reg, ip, Operand(kVFPRoundingModeMask));
3745  vmsr(result_reg);
3746  vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding);
3747  vmov(result_reg, input_reg.low());
3748  // Restore FPSCR.
3749  vmsr(ip);
3750  bind(&done);
3751 }
3752 
3753 
3754 void MacroAssembler::LoadInstanceDescriptors(Register map,
3755  Register descriptors) {
3756  ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3757 }
3758 
3759 
3760 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3761  ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3762  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3763 }
3764 
3765 
3766 void MacroAssembler::EnumLength(Register dst, Register map) {
3767  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3768  ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3769  and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
3770 }
3771 
3772 
3773 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3774  Register empty_fixed_array_value = r6;
3775  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3776  Label next, start;
3777  mov(r2, r0);
3778 
3779  // Check if the enum length field is properly initialized, indicating that
3780  // there is an enum cache.
3781  ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3782 
3783  EnumLength(r3, r1);
3784  cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
3785  b(eq, call_runtime);
3786 
3787  jmp(&start);
3788 
3789  bind(&next);
3790  ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3791 
3792  // For all objects but the receiver, check that the cache is empty.
3793  EnumLength(r3, r1);
3794  cmp(r3, Operand(Smi::FromInt(0)));
3795  b(ne, call_runtime);
3796 
3797  bind(&start);
3798 
3799  // Check that there are no elements. Register r2 contains the current JS
3800  // object we've reached through the prototype chain.
3801  ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3802  cmp(r2, empty_fixed_array_value);
3803  b(ne, call_runtime);
3804 
3805  ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3806  cmp(r2, null_value);
3807  b(ne, &next);
3808 }
3809 
3810 
3811 #ifdef DEBUG
3812 bool AreAliased(Register reg1,
3813  Register reg2,
3814  Register reg3,
3815  Register reg4,
3816  Register reg5,
3817  Register reg6) {
3818  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3819  reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
3820 
3821  RegList regs = 0;
3822  if (reg1.is_valid()) regs |= reg1.bit();
3823  if (reg2.is_valid()) regs |= reg2.bit();
3824  if (reg3.is_valid()) regs |= reg3.bit();
3825  if (reg4.is_valid()) regs |= reg4.bit();
3826  if (reg5.is_valid()) regs |= reg5.bit();
3827  if (reg6.is_valid()) regs |= reg6.bit();
3828  int n_of_non_aliasing_regs = NumRegs(regs);
3829 
3830  return n_of_valid_regs != n_of_non_aliasing_regs;
3831 }
3832 #endif
3833 
3834 
3835 CodePatcher::CodePatcher(byte* address, int instructions)
3836  : address_(address),
3837  instructions_(instructions),
3838  size_(instructions * Assembler::kInstrSize),
3839  masm_(NULL, address, size_ + Assembler::kGap) {
3840  // Create a new macro assembler pointing to the address of the code to patch.
3841  // The size is adjusted with kGap on order for the assembler to generate size
3842  // bytes of instructions without failing with buffer size constraints.
3843  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3844 }
3845 
3846 
3847 CodePatcher::~CodePatcher() {
3848  // Indicate that code has changed.
3849  CPU::FlushICache(address_, size_);
3850 
3851  // Check that the code was patched as expected.
3852  ASSERT(masm_.pc_ == address_ + size_);
3853  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3854 }
3855 
3856 
3857 void CodePatcher::Emit(Instr instr) {
3858  masm()->emit(instr);
3859 }
3860 
3861 
3862 void CodePatcher::Emit(Address addr) {
3863  masm()->emit(reinterpret_cast<Instr>(addr));
3864 }
3865 
3866 
3867 void CodePatcher::EmitCondition(Condition cond) {
3868  Instr instr = Assembler::instr_at(masm_.pc_);
3869  instr = (instr & ~kCondMask) | cond;
3870  masm_.emit(instr);
3871 }
3872 
3873 
3874 } } // namespace v8::internal
3875 
3876 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:157
const RegList kSafepointSavedRegisters
Definition: frames-arm.h:97
const SwVfpRegister s2
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:4016
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: v8globals.h:471
const Register r3
const int kDoubleSizeLog2
Definition: globals.h:222
const Instr kLdrPCPattern
const uint32_t kVFPInvalidOpExceptionBit
const int kNumRegisters
Definition: constants-arm.h:92
void set_code(int code)
int NumRegs(RegList reglist)
Definition: frames.cc:1383
const DwVfpRegister d0
const uint32_t kVFPOverflowExceptionBit
const Register r6
const uint32_t kVFPUnderflowExceptionBit
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:38
const int kSignMask
const int kNumSafepointSavedRegisters
Definition: frames-arm.h:98
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
Definition: globals.h:232
const uint32_t kStringRepresentationMask
Definition: objects.h:474
const Register r2
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
int WhichPowerOf2(uint32_t x)
Definition: utils.h:56
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
bool IsFastElementsKind(ElementsKind kind)
const intptr_t kHeapObjectTagMask
Definition: v8.h:4011
const uint32_t kVFPFlushToZeroMask
const uint32_t kNotStringTag
Definition: objects.h:457
const Register sp
DwVfpRegister DoubleRegister
bool IsFastPackedElementsKind(ElementsKind kind)
const int kDoubleSize
Definition: globals.h:218
const uint32_t kIsIndirectStringMask
Definition: objects.h:481
const Register ip
const int kPointerSize
Definition: globals.h:220
const DwVfpRegister d7
const Address kZapValue
Definition: v8globals.h:80
const int kHeapObjectTag
Definition: v8.h:4009
bool IsAligned(T value, U alignment)
Definition: utils.h:206
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
#define cp
const Register pc
const uint32_t kVFPExceptionMask
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:50
int TenToThe(int exponent)
Definition: utils.h:795
MacroAssembler(Isolate *isolate, void *buffer, int size)
#define kRootRegister
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
InvokeFlag
Definition: v8.h:1425
const Register lr
const uint32_t kIsNotStringMask
Definition: objects.h:455
const Register r1
#define kDoubleRegZero
MemOperand FieldMemOperand(Register object, int offset)
const int kNumSafepointRegisters
Definition: frames-arm.h:92
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const uint32_t kVFPInexactExceptionBit
const int kSmiTagSize
Definition: v8.h:4015
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
const int kSmiTag
Definition: v8.h:4014
const uint32_t kIsIndirectStringTag
Definition: objects.h:482
const int kPageSizeBits
Definition: v8globals.h:92
const uint32_t kVFPRoundingModeMask
const Register no_reg
const DwVfpRegister d1
const Register fp
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for ARMv7(ARM only)") DEFINE_bool(enable_fpu
#define STATIC_ASSERT(test)
Definition: checks.h:283
const uint32_t kAsciiStringTag
Definition: objects.h:470
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
const SwVfpRegister s15
const int kCharSize
Definition: globals.h:215
const Register r5
const uint32_t kStringEncodingMask
Definition: objects.h:468
const Register r4
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag
Definition: objects-inl.h:3923
const Register r7