v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
29 
30 #include "v8.h"
31 
32 #if defined(V8_TARGET_ARCH_ARM)
33 
34 #include "bootstrapper.h"
35 #include "codegen.h"
36 #include "debug.h"
37 #include "runtime.h"
38 
39 namespace v8 {
40 namespace internal {
41 
42 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43  : Assembler(arg_isolate, buffer, size),
44  generating_stub_(false),
45  allow_stub_calls_(true),
46  has_frame_(false) {
47  if (isolate() != NULL) {
48  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
49  isolate());
50  }
51 }
52 
53 
54 // We always generate arm code, never thumb code, even if V8 is compiled to
55 // thumb, so we require inter-working support
56 #if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
57 #error "flag -mthumb-interwork missing"
58 #endif
59 
60 
61 // We do not support thumb inter-working with an arm architecture not supporting
62 // the blx instruction (below v5t). If you know what CPU you are compiling for
63 // you can use -march=armv7 or similar.
64 #if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
65 # error "For thumb inter-working we require an architecture which supports blx"
66 #endif
67 
68 
69 // Using bx does not yield better code, so use it only when required
70 #if defined(USE_THUMB_INTERWORK)
71 #define USE_BX 1
72 #endif
73 
74 
75 void MacroAssembler::Jump(Register target, Condition cond) {
76 #if USE_BX
77  bx(target, cond);
78 #else
79  mov(pc, Operand(target), LeaveCC, cond);
80 #endif
81 }
82 
83 
84 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
85  Condition cond) {
86 #if USE_BX
87  mov(ip, Operand(target, rmode));
88  bx(ip, cond);
89 #else
90  mov(pc, Operand(target, rmode), LeaveCC, cond);
91 #endif
92 }
93 
94 
95 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
96  Condition cond) {
97  ASSERT(!RelocInfo::IsCodeTarget(rmode));
98  Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
99 }
100 
101 
102 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
103  Condition cond) {
104  ASSERT(RelocInfo::IsCodeTarget(rmode));
105  // 'code' is always generated ARM code, never THUMB code
106  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
107 }
108 
109 
110 int MacroAssembler::CallSize(Register target, Condition cond) {
111 #if USE_BLX
112  return kInstrSize;
113 #else
114  return 2 * kInstrSize;
115 #endif
116 }
117 
118 
119 void MacroAssembler::Call(Register target, Condition cond) {
120  // Block constant pool for the call instruction sequence.
121  BlockConstPoolScope block_const_pool(this);
122  Label start;
123  bind(&start);
124 #if USE_BLX
125  blx(target, cond);
126 #else
127  // set lr for return at current pc + 8
128  mov(lr, Operand(pc), LeaveCC, cond);
129  mov(pc, Operand(target), LeaveCC, cond);
130 #endif
131  ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
132 }
133 
134 
135 int MacroAssembler::CallSize(
136  Address target, RelocInfo::Mode rmode, Condition cond) {
137  int size = 2 * kInstrSize;
138  Instr mov_instr = cond | MOV | LeaveCC;
139  intptr_t immediate = reinterpret_cast<intptr_t>(target);
140  if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
141  size += kInstrSize;
142  }
143  return size;
144 }
145 
146 
147 void MacroAssembler::Call(Address target,
148  RelocInfo::Mode rmode,
149  Condition cond) {
150  // Block constant pool for the call instruction sequence.
151  BlockConstPoolScope block_const_pool(this);
152  Label start;
153  bind(&start);
154 #if USE_BLX
155  // On ARMv5 and after the recommended call sequence is:
156  // ldr ip, [pc, #...]
157  // blx ip
158 
159  // Statement positions are expected to be recorded when the target
160  // address is loaded. The mov method will automatically record
161  // positions when pc is the target, since this is not the case here
162  // we have to do it explicitly.
163  positions_recorder()->WriteRecordedPositions();
164 
165  mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
166  blx(ip, cond);
167 
168  ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
169 #else
170  // Set lr for return at current pc + 8.
171  mov(lr, Operand(pc), LeaveCC, cond);
172  // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
173  mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
174  ASSERT(kCallTargetAddressOffset == kInstrSize);
175 #endif
176  ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
177 }
178 
179 
180 int MacroAssembler::CallSize(Handle<Code> code,
181  RelocInfo::Mode rmode,
182  unsigned ast_id,
183  Condition cond) {
184  return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
185 }
186 
187 
188 void MacroAssembler::Call(Handle<Code> code,
189  RelocInfo::Mode rmode,
190  unsigned ast_id,
191  Condition cond) {
192  Label start;
193  bind(&start);
194  ASSERT(RelocInfo::IsCodeTarget(rmode));
195  if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
196  SetRecordedAstId(ast_id);
197  rmode = RelocInfo::CODE_TARGET_WITH_ID;
198  }
199  // 'code' is always generated ARM code, never THUMB code
200  Call(reinterpret_cast<Address>(code.location()), rmode, cond);
201  ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
202  SizeOfCodeGeneratedSince(&start));
203 }
204 
205 
206 void MacroAssembler::Ret(Condition cond) {
207 #if USE_BX
208  bx(lr, cond);
209 #else
210  mov(pc, Operand(lr), LeaveCC, cond);
211 #endif
212 }
213 
214 
215 void MacroAssembler::Drop(int count, Condition cond) {
216  if (count > 0) {
217  add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
218  }
219 }
220 
221 
222 void MacroAssembler::Ret(int drop, Condition cond) {
223  Drop(drop, cond);
224  Ret(cond);
225 }
226 
227 
228 void MacroAssembler::Swap(Register reg1,
229  Register reg2,
230  Register scratch,
231  Condition cond) {
232  if (scratch.is(no_reg)) {
233  eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
234  eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
235  eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
236  } else {
237  mov(scratch, reg1, LeaveCC, cond);
238  mov(reg1, reg2, LeaveCC, cond);
239  mov(reg2, scratch, LeaveCC, cond);
240  }
241 }
242 
243 
244 void MacroAssembler::Call(Label* target) {
245  bl(target);
246 }
247 
248 
249 void MacroAssembler::Push(Handle<Object> handle) {
250  mov(ip, Operand(handle));
251  push(ip);
252 }
253 
254 
255 void MacroAssembler::Move(Register dst, Handle<Object> value) {
256  mov(dst, Operand(value));
257 }
258 
259 
260 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
261  if (!dst.is(src)) {
262  mov(dst, src, LeaveCC, cond);
263  }
264 }
265 
266 
267 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
268  ASSERT(CpuFeatures::IsSupported(VFP3));
269  CpuFeatures::Scope scope(VFP3);
270  if (!dst.is(src)) {
271  vmov(dst, src);
272  }
273 }
274 
275 
276 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
277  Condition cond) {
278  if (!src2.is_reg() &&
279  !src2.must_use_constant_pool() &&
280  src2.immediate() == 0) {
281  mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
282 
283  } else if (!src2.is_single_instruction() &&
284  !src2.must_use_constant_pool() &&
285  CpuFeatures::IsSupported(ARMv7) &&
286  IsPowerOf2(src2.immediate() + 1)) {
287  ubfx(dst, src1, 0,
288  WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
289 
290  } else {
291  and_(dst, src1, src2, LeaveCC, cond);
292  }
293 }
294 
295 
296 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
297  Condition cond) {
298  ASSERT(lsb < 32);
299  if (!CpuFeatures::IsSupported(ARMv7)) {
300  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
301  and_(dst, src1, Operand(mask), LeaveCC, cond);
302  if (lsb != 0) {
303  mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
304  }
305  } else {
306  ubfx(dst, src1, lsb, width, cond);
307  }
308 }
309 
310 
311 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
312  Condition cond) {
313  ASSERT(lsb < 32);
314  if (!CpuFeatures::IsSupported(ARMv7)) {
315  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
316  and_(dst, src1, Operand(mask), LeaveCC, cond);
317  int shift_up = 32 - lsb - width;
318  int shift_down = lsb + shift_up;
319  if (shift_up != 0) {
320  mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
321  }
322  if (shift_down != 0) {
323  mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
324  }
325  } else {
326  sbfx(dst, src1, lsb, width, cond);
327  }
328 }
329 
330 
331 void MacroAssembler::Bfi(Register dst,
332  Register src,
333  Register scratch,
334  int lsb,
335  int width,
336  Condition cond) {
337  ASSERT(0 <= lsb && lsb < 32);
338  ASSERT(0 <= width && width < 32);
339  ASSERT(lsb + width < 32);
340  ASSERT(!scratch.is(dst));
341  if (width == 0) return;
342  if (!CpuFeatures::IsSupported(ARMv7)) {
343  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
344  bic(dst, dst, Operand(mask));
345  and_(scratch, src, Operand((1 << width) - 1));
346  mov(scratch, Operand(scratch, LSL, lsb));
347  orr(dst, dst, scratch);
348  } else {
349  bfi(dst, src, lsb, width, cond);
350  }
351 }
352 
353 
354 void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
355  ASSERT(lsb < 32);
356  if (!CpuFeatures::IsSupported(ARMv7)) {
357  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
358  bic(dst, dst, Operand(mask));
359  } else {
360  bfc(dst, lsb, width, cond);
361  }
362 }
363 
364 
365 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
366  Condition cond) {
367  if (!CpuFeatures::IsSupported(ARMv7)) {
368  ASSERT(!dst.is(pc) && !src.rm().is(pc));
369  ASSERT((satpos >= 0) && (satpos <= 31));
370 
371  // These asserts are required to ensure compatibility with the ARMv7
372  // implementation.
373  ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
374  ASSERT(src.rs().is(no_reg));
375 
376  Label done;
377  int satval = (1 << satpos) - 1;
378 
379  if (cond != al) {
380  b(NegateCondition(cond), &done); // Skip saturate if !condition.
381  }
382  if (!(src.is_reg() && dst.is(src.rm()))) {
383  mov(dst, src);
384  }
385  tst(dst, Operand(~satval));
386  b(eq, &done);
387  mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
388  mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
389  bind(&done);
390  } else {
391  usat(dst, satpos, src, cond);
392  }
393 }
394 
395 
396 void MacroAssembler::LoadRoot(Register destination,
397  Heap::RootListIndex index,
398  Condition cond) {
399  ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
400 }
401 
402 
403 void MacroAssembler::StoreRoot(Register source,
404  Heap::RootListIndex index,
405  Condition cond) {
406  str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
407 }
408 
409 
410 void MacroAssembler::LoadHeapObject(Register result,
411  Handle<HeapObject> object) {
412  if (isolate()->heap()->InNewSpace(*object)) {
413  Handle<JSGlobalPropertyCell> cell =
414  isolate()->factory()->NewJSGlobalPropertyCell(object);
415  mov(result, Operand(cell));
416  ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
417  } else {
418  mov(result, Operand(object));
419  }
420 }
421 
422 
423 void MacroAssembler::InNewSpace(Register object,
424  Register scratch,
425  Condition cond,
426  Label* branch) {
427  ASSERT(cond == eq || cond == ne);
428  and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
429  cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
430  b(cond, branch);
431 }
432 
433 
434 void MacroAssembler::RecordWriteField(
435  Register object,
436  int offset,
437  Register value,
438  Register dst,
439  LinkRegisterStatus lr_status,
440  SaveFPRegsMode save_fp,
441  RememberedSetAction remembered_set_action,
442  SmiCheck smi_check) {
443  // First, check if a write barrier is even needed. The tests below
444  // catch stores of Smis.
445  Label done;
446 
447  // Skip barrier if writing a smi.
448  if (smi_check == INLINE_SMI_CHECK) {
449  JumpIfSmi(value, &done);
450  }
451 
452  // Although the object register is tagged, the offset is relative to the start
453  // of the object, so so offset must be a multiple of kPointerSize.
454  ASSERT(IsAligned(offset, kPointerSize));
455 
456  add(dst, object, Operand(offset - kHeapObjectTag));
457  if (emit_debug_code()) {
458  Label ok;
459  tst(dst, Operand((1 << kPointerSizeLog2) - 1));
460  b(eq, &ok);
461  stop("Unaligned cell in write barrier");
462  bind(&ok);
463  }
464 
465  RecordWrite(object,
466  dst,
467  value,
468  lr_status,
469  save_fp,
470  remembered_set_action,
472 
473  bind(&done);
474 
475  // Clobber clobbered input registers when running with the debug-code flag
476  // turned on to provoke errors.
477  if (emit_debug_code()) {
478  mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
479  mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
480  }
481 }
482 
483 
484 // Will clobber 4 registers: object, address, scratch, ip. The
485 // register 'object' contains a heap object pointer. The heap object
486 // tag is shifted away.
487 void MacroAssembler::RecordWrite(Register object,
488  Register address,
489  Register value,
490  LinkRegisterStatus lr_status,
491  SaveFPRegsMode fp_mode,
492  RememberedSetAction remembered_set_action,
493  SmiCheck smi_check) {
494  // The compiled code assumes that record write doesn't change the
495  // context register, so we check that none of the clobbered
496  // registers are cp.
497  ASSERT(!address.is(cp) && !value.is(cp));
498 
499  if (emit_debug_code()) {
500  ldr(ip, MemOperand(address));
501  cmp(ip, value);
502  Check(eq, "Wrong address or value passed to RecordWrite");
503  }
504 
505  Label done;
506 
507  if (smi_check == INLINE_SMI_CHECK) {
508  ASSERT_EQ(0, kSmiTag);
509  tst(value, Operand(kSmiTagMask));
510  b(eq, &done);
511  }
512 
513  CheckPageFlag(value,
514  value, // Used as scratch.
515  MemoryChunk::kPointersToHereAreInterestingMask,
516  eq,
517  &done);
518  CheckPageFlag(object,
519  value, // Used as scratch.
520  MemoryChunk::kPointersFromHereAreInterestingMask,
521  eq,
522  &done);
523 
524  // Record the actual write.
525  if (lr_status == kLRHasNotBeenSaved) {
526  push(lr);
527  }
528  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
529  CallStub(&stub);
530  if (lr_status == kLRHasNotBeenSaved) {
531  pop(lr);
532  }
533 
534  bind(&done);
535 
536  // Clobber clobbered registers when running with the debug-code flag
537  // turned on to provoke errors.
538  if (emit_debug_code()) {
539  mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
540  mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
541  }
542 }
543 
544 
545 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
546  Register address,
547  Register scratch,
548  SaveFPRegsMode fp_mode,
549  RememberedSetFinalAction and_then) {
550  Label done;
551  if (emit_debug_code()) {
552  Label ok;
553  JumpIfNotInNewSpace(object, scratch, &ok);
554  stop("Remembered set pointer is in new space");
555  bind(&ok);
556  }
557  // Load store buffer top.
558  ExternalReference store_buffer =
559  ExternalReference::store_buffer_top(isolate());
560  mov(ip, Operand(store_buffer));
561  ldr(scratch, MemOperand(ip));
562  // Store pointer to buffer and increment buffer top.
563  str(address, MemOperand(scratch, kPointerSize, PostIndex));
564  // Write back new top of buffer.
565  str(scratch, MemOperand(ip));
566  // Call stub on end of buffer.
567  // Check for end of buffer.
568  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
569  if (and_then == kFallThroughAtEnd) {
570  b(eq, &done);
571  } else {
572  ASSERT(and_then == kReturnAtEnd);
573  Ret(eq);
574  }
575  push(lr);
576  StoreBufferOverflowStub store_buffer_overflow =
577  StoreBufferOverflowStub(fp_mode);
578  CallStub(&store_buffer_overflow);
579  pop(lr);
580  bind(&done);
581  if (and_then == kReturnAtEnd) {
582  Ret();
583  }
584 }
585 
586 
587 // Push and pop all registers that can hold pointers.
588 void MacroAssembler::PushSafepointRegisters() {
589  // Safepoints expect a block of contiguous register values starting with r0:
591  // Safepoints expect a block of kNumSafepointRegisters values on the
592  // stack, so adjust the stack for unsaved registers.
593  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
594  ASSERT(num_unsaved >= 0);
595  sub(sp, sp, Operand(num_unsaved * kPointerSize));
597 }
598 
599 
600 void MacroAssembler::PopSafepointRegisters() {
601  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
603  add(sp, sp, Operand(num_unsaved * kPointerSize));
604 }
605 
606 
607 void MacroAssembler::PushSafepointRegistersAndDoubles() {
608  PushSafepointRegisters();
609  sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
610  kDoubleSize));
611  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
612  vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
613  }
614 }
615 
616 
617 void MacroAssembler::PopSafepointRegistersAndDoubles() {
618  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
619  vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
620  }
621  add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
622  kDoubleSize));
623  PopSafepointRegisters();
624 }
625 
626 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
627  Register dst) {
628  str(src, SafepointRegistersAndDoublesSlot(dst));
629 }
630 
631 
632 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
633  str(src, SafepointRegisterSlot(dst));
634 }
635 
636 
637 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
638  ldr(dst, SafepointRegisterSlot(src));
639 }
640 
641 
642 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
643  // The registers are pushed starting with the highest encoding,
644  // which means that lowest encodings are closest to the stack pointer.
645  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
646  return reg_code;
647 }
648 
649 
650 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
651  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
652 }
653 
654 
655 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
656  // General purpose registers are pushed last on the stack.
657  int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
658  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
659  return MemOperand(sp, doubles_size + register_offset);
660 }
661 
662 
663 void MacroAssembler::Ldrd(Register dst1, Register dst2,
664  const MemOperand& src, Condition cond) {
665  ASSERT(src.rm().is(no_reg));
666  ASSERT(!dst1.is(lr)); // r14.
667  ASSERT_EQ(0, dst1.code() % 2);
668  ASSERT_EQ(dst1.code() + 1, dst2.code());
669 
670  // V8 does not use this addressing mode, so the fallback code
671  // below doesn't support it yet.
672  ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
673 
674  // Generate two ldr instructions if ldrd is not available.
675  if (CpuFeatures::IsSupported(ARMv7)) {
676  CpuFeatures::Scope scope(ARMv7);
677  ldrd(dst1, dst2, src, cond);
678  } else {
679  if ((src.am() == Offset) || (src.am() == NegOffset)) {
680  MemOperand src2(src);
681  src2.set_offset(src2.offset() + 4);
682  if (dst1.is(src.rn())) {
683  ldr(dst2, src2, cond);
684  ldr(dst1, src, cond);
685  } else {
686  ldr(dst1, src, cond);
687  ldr(dst2, src2, cond);
688  }
689  } else { // PostIndex or NegPostIndex.
690  ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
691  if (dst1.is(src.rn())) {
692  ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
693  ldr(dst1, src, cond);
694  } else {
695  MemOperand src2(src);
696  src2.set_offset(src2.offset() - 4);
697  ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
698  ldr(dst2, src2, cond);
699  }
700  }
701  }
702 }
703 
704 
705 void MacroAssembler::Strd(Register src1, Register src2,
706  const MemOperand& dst, Condition cond) {
707  ASSERT(dst.rm().is(no_reg));
708  ASSERT(!src1.is(lr)); // r14.
709  ASSERT_EQ(0, src1.code() % 2);
710  ASSERT_EQ(src1.code() + 1, src2.code());
711 
712  // V8 does not use this addressing mode, so the fallback code
713  // below doesn't support it yet.
714  ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
715 
716  // Generate two str instructions if strd is not available.
717  if (CpuFeatures::IsSupported(ARMv7)) {
718  CpuFeatures::Scope scope(ARMv7);
719  strd(src1, src2, dst, cond);
720  } else {
721  MemOperand dst2(dst);
722  if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
723  dst2.set_offset(dst2.offset() + 4);
724  str(src1, dst, cond);
725  str(src2, dst2, cond);
726  } else { // PostIndex or NegPostIndex.
727  ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
728  dst2.set_offset(dst2.offset() - 4);
729  str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
730  str(src2, dst2, cond);
731  }
732  }
733 }
734 
735 
736 void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
737  const Register scratch,
738  const Condition cond) {
739  vmrs(scratch, cond);
740  bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
741  vmsr(scratch, cond);
742 }
743 
744 
745 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
746  const DwVfpRegister src2,
747  const Condition cond) {
748  // Compare and move FPSCR flags to the normal condition flags.
749  VFPCompareAndLoadFlags(src1, src2, pc, cond);
750 }
751 
752 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
753  const double src2,
754  const Condition cond) {
755  // Compare and move FPSCR flags to the normal condition flags.
756  VFPCompareAndLoadFlags(src1, src2, pc, cond);
757 }
758 
759 
760 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
761  const DwVfpRegister src2,
762  const Register fpscr_flags,
763  const Condition cond) {
764  // Compare and load FPSCR.
765  vcmp(src1, src2, cond);
766  vmrs(fpscr_flags, cond);
767 }
768 
769 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
770  const double src2,
771  const Register fpscr_flags,
772  const Condition cond) {
773  // Compare and load FPSCR.
774  vcmp(src1, src2, cond);
775  vmrs(fpscr_flags, cond);
776 }
777 
778 void MacroAssembler::Vmov(const DwVfpRegister dst,
779  const double imm,
780  const Condition cond) {
781  ASSERT(CpuFeatures::IsEnabled(VFP3));
782  static const DoubleRepresentation minus_zero(-0.0);
783  static const DoubleRepresentation zero(0.0);
784  DoubleRepresentation value(imm);
785  // Handle special values first.
786  if (value.bits == zero.bits) {
787  vmov(dst, kDoubleRegZero, cond);
788  } else if (value.bits == minus_zero.bits) {
789  vneg(dst, kDoubleRegZero, cond);
790  } else {
791  vmov(dst, imm, cond);
792  }
793 }
794 
795 
796 void MacroAssembler::EnterFrame(StackFrame::Type type) {
797  // r0-r3: preserved
798  stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
799  mov(ip, Operand(Smi::FromInt(type)));
800  push(ip);
801  mov(ip, Operand(CodeObject()));
802  push(ip);
803  add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
804 }
805 
806 
807 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
808  // r0: preserved
809  // r1: preserved
810  // r2: preserved
811 
812  // Drop the execution stack down to the frame pointer and restore
813  // the caller frame pointer and return address.
814  mov(sp, fp);
815  ldm(ia_w, sp, fp.bit() | lr.bit());
816 }
817 
818 
819 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
820  // Set up the frame structure on the stack.
821  ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
822  ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
823  ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
824  Push(lr, fp);
825  mov(fp, Operand(sp)); // Set up new frame pointer.
826  // Reserve room for saved entry sp and code object.
827  sub(sp, sp, Operand(2 * kPointerSize));
828  if (emit_debug_code()) {
829  mov(ip, Operand(0));
830  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
831  }
832  mov(ip, Operand(CodeObject()));
833  str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
834 
835  // Save the frame pointer and the context in top.
836  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
837  str(fp, MemOperand(ip));
838  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
839  str(cp, MemOperand(ip));
840 
841  // Optionally save all double registers.
842  if (save_doubles) {
843  DwVfpRegister first = d0;
844  DwVfpRegister last =
845  DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
846  vstm(db_w, sp, first, last);
847  // Note that d0 will be accessible at
848  // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
849  // since the sp slot and code slot were pushed after the fp.
850  }
851 
852  // Reserve place for the return address and stack space and align the frame
853  // preparing for calling the runtime function.
854  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
855  sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
856  if (frame_alignment > 0) {
857  ASSERT(IsPowerOf2(frame_alignment));
858  and_(sp, sp, Operand(-frame_alignment));
859  }
860 
861  // Set the exit frame sp value to point just before the return address
862  // location.
863  add(ip, sp, Operand(kPointerSize));
864  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
865 }
866 
867 
868 void MacroAssembler::InitializeNewString(Register string,
869  Register length,
870  Heap::RootListIndex map_index,
871  Register scratch1,
872  Register scratch2) {
873  mov(scratch1, Operand(length, LSL, kSmiTagSize));
874  LoadRoot(scratch2, map_index);
875  str(scratch1, FieldMemOperand(string, String::kLengthOffset));
876  mov(scratch1, Operand(String::kEmptyHashField));
877  str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
878  str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
879 }
880 
881 
882 int MacroAssembler::ActivationFrameAlignment() {
883 #if defined(V8_HOST_ARCH_ARM)
884  // Running on the real platform. Use the alignment as mandated by the local
885  // environment.
886  // Note: This will break if we ever start generating snapshots on one ARM
887  // platform for another ARM platform with a different alignment.
888  return OS::ActivationFrameAlignment();
889 #else // defined(V8_HOST_ARCH_ARM)
890  // If we are using the simulator then we should always align to the expected
891  // alignment. As the simulator is used to generate snapshots we do not know
892  // if the target platform will need alignment, so this is controlled from a
893  // flag.
894  return FLAG_sim_stack_alignment;
895 #endif // defined(V8_HOST_ARCH_ARM)
896 }
897 
898 
899 void MacroAssembler::LeaveExitFrame(bool save_doubles,
900  Register argument_count) {
901  // Optionally restore all double registers.
902  if (save_doubles) {
903  // Calculate the stack location of the saved doubles and restore them.
904  const int offset = 2 * kPointerSize;
905  sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
906  DwVfpRegister first = d0;
907  DwVfpRegister last =
908  DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
909  vldm(ia, r3, first, last);
910  }
911 
912  // Clear top frame.
913  mov(r3, Operand(0, RelocInfo::NONE));
914  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
915  str(r3, MemOperand(ip));
916 
917  // Restore current context from top and clear it in debug mode.
918  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
919  ldr(cp, MemOperand(ip));
920 #ifdef DEBUG
921  str(r3, MemOperand(ip));
922 #endif
923 
924  // Tear down the exit frame, pop the arguments, and return.
925  mov(sp, Operand(fp));
926  ldm(ia_w, sp, fp.bit() | lr.bit());
927  if (argument_count.is_valid()) {
928  add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
929  }
930 }
931 
932 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
933  if (use_eabi_hardfloat()) {
934  Move(dst, d0);
935  } else {
936  vmov(dst, r0, r1);
937  }
938 }
939 
940 
941 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
942  // This macro takes the dst register to make the code more readable
943  // at the call sites. However, the dst register has to be r5 to
944  // follow the calling convention which requires the call type to be
945  // in r5.
946  ASSERT(dst.is(r5));
947  if (call_kind == CALL_AS_FUNCTION) {
948  mov(dst, Operand(Smi::FromInt(1)));
949  } else {
950  mov(dst, Operand(Smi::FromInt(0)));
951  }
952 }
953 
954 
955 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
956  const ParameterCount& actual,
957  Handle<Code> code_constant,
958  Register code_reg,
959  Label* done,
960  bool* definitely_mismatches,
962  const CallWrapper& call_wrapper,
963  CallKind call_kind) {
964  bool definitely_matches = false;
965  *definitely_mismatches = false;
966  Label regular_invoke;
967 
968  // Check whether the expected and actual arguments count match. If not,
969  // setup registers according to contract with ArgumentsAdaptorTrampoline:
970  // r0: actual arguments count
971  // r1: function (passed through to callee)
972  // r2: expected arguments count
973  // r3: callee code entry
974 
975  // The code below is made a lot easier because the calling code already sets
976  // up actual and expected registers according to the contract if values are
977  // passed in registers.
978  ASSERT(actual.is_immediate() || actual.reg().is(r0));
979  ASSERT(expected.is_immediate() || expected.reg().is(r2));
980  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
981 
982  if (expected.is_immediate()) {
983  ASSERT(actual.is_immediate());
984  if (expected.immediate() == actual.immediate()) {
985  definitely_matches = true;
986  } else {
987  mov(r0, Operand(actual.immediate()));
988  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
989  if (expected.immediate() == sentinel) {
990  // Don't worry about adapting arguments for builtins that
991  // don't want that done. Skip adaption code by making it look
992  // like we have a match between expected and actual number of
993  // arguments.
994  definitely_matches = true;
995  } else {
996  *definitely_mismatches = true;
997  mov(r2, Operand(expected.immediate()));
998  }
999  }
1000  } else {
1001  if (actual.is_immediate()) {
1002  cmp(expected.reg(), Operand(actual.immediate()));
1003  b(eq, &regular_invoke);
1004  mov(r0, Operand(actual.immediate()));
1005  } else {
1006  cmp(expected.reg(), Operand(actual.reg()));
1007  b(eq, &regular_invoke);
1008  }
1009  }
1010 
1011  if (!definitely_matches) {
1012  if (!code_constant.is_null()) {
1013  mov(r3, Operand(code_constant));
1014  add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1015  }
1016 
1017  Handle<Code> adaptor =
1018  isolate()->builtins()->ArgumentsAdaptorTrampoline();
1019  if (flag == CALL_FUNCTION) {
1020  call_wrapper.BeforeCall(CallSize(adaptor));
1021  SetCallKind(r5, call_kind);
1022  Call(adaptor);
1023  call_wrapper.AfterCall();
1024  if (!*definitely_mismatches) {
1025  b(done);
1026  }
1027  } else {
1028  SetCallKind(r5, call_kind);
1029  Jump(adaptor, RelocInfo::CODE_TARGET);
1030  }
1031  bind(&regular_invoke);
1032  }
1033 }
1034 
1035 
1036 void MacroAssembler::InvokeCode(Register code,
1037  const ParameterCount& expected,
1038  const ParameterCount& actual,
1039  InvokeFlag flag,
1040  const CallWrapper& call_wrapper,
1041  CallKind call_kind) {
1042  // You can't call a function without a valid frame.
1043  ASSERT(flag == JUMP_FUNCTION || has_frame());
1044 
1045  Label done;
1046  bool definitely_mismatches = false;
1047  InvokePrologue(expected, actual, Handle<Code>::null(), code,
1048  &done, &definitely_mismatches, flag,
1049  call_wrapper, call_kind);
1050  if (!definitely_mismatches) {
1051  if (flag == CALL_FUNCTION) {
1052  call_wrapper.BeforeCall(CallSize(code));
1053  SetCallKind(r5, call_kind);
1054  Call(code);
1055  call_wrapper.AfterCall();
1056  } else {
1057  ASSERT(flag == JUMP_FUNCTION);
1058  SetCallKind(r5, call_kind);
1059  Jump(code);
1060  }
1061 
1062  // Continue here if InvokePrologue does handle the invocation due to
1063  // mismatched parameter counts.
1064  bind(&done);
1065  }
1066 }
1067 
1068 
1069 void MacroAssembler::InvokeCode(Handle<Code> code,
1070  const ParameterCount& expected,
1071  const ParameterCount& actual,
1072  RelocInfo::Mode rmode,
1073  InvokeFlag flag,
1074  CallKind call_kind) {
1075  // You can't call a function without a valid frame.
1076  ASSERT(flag == JUMP_FUNCTION || has_frame());
1077 
1078  Label done;
1079  bool definitely_mismatches = false;
1080  InvokePrologue(expected, actual, code, no_reg,
1081  &done, &definitely_mismatches, flag,
1082  NullCallWrapper(), call_kind);
1083  if (!definitely_mismatches) {
1084  if (flag == CALL_FUNCTION) {
1085  SetCallKind(r5, call_kind);
1086  Call(code, rmode);
1087  } else {
1088  SetCallKind(r5, call_kind);
1089  Jump(code, rmode);
1090  }
1091 
1092  // Continue here if InvokePrologue does handle the invocation due to
1093  // mismatched parameter counts.
1094  bind(&done);
1095  }
1096 }
1097 
1098 
1099 void MacroAssembler::InvokeFunction(Register fun,
1100  const ParameterCount& actual,
1101  InvokeFlag flag,
1102  const CallWrapper& call_wrapper,
1103  CallKind call_kind) {
1104  // You can't call a function without a valid frame.
1105  ASSERT(flag == JUMP_FUNCTION || has_frame());
1106 
1107  // Contract with called JS functions requires that function is passed in r1.
1108  ASSERT(fun.is(r1));
1109 
1110  Register expected_reg = r2;
1111  Register code_reg = r3;
1112 
1113  ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1114  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1115  ldr(expected_reg,
1116  FieldMemOperand(code_reg,
1117  SharedFunctionInfo::kFormalParameterCountOffset));
1118  mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
1119  ldr(code_reg,
1120  FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1121 
1122  ParameterCount expected(expected_reg);
1123  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
1124 }
1125 
1126 
1127 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1128  const ParameterCount& actual,
1129  InvokeFlag flag,
1130  const CallWrapper& call_wrapper,
1131  CallKind call_kind) {
1132  // You can't call a function without a valid frame.
1133  ASSERT(flag == JUMP_FUNCTION || has_frame());
1134 
1135  // Get the function and setup the context.
1136  LoadHeapObject(r1, function);
1137  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1138 
1139  ParameterCount expected(function->shared()->formal_parameter_count());
1140  // We call indirectly through the code field in the function to
1141  // allow recompilation to take effect without changing any of the
1142  // call sites.
1143  ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1144  InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
1145 }
1146 
1147 
1148 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1149  Register map,
1150  Register scratch,
1151  Label* fail) {
1152  ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1153  IsInstanceJSObjectType(map, scratch, fail);
1154 }
1155 
1156 
1157 void MacroAssembler::IsInstanceJSObjectType(Register map,
1158  Register scratch,
1159  Label* fail) {
1160  ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1161  cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1162  b(lt, fail);
1163  cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1164  b(gt, fail);
1165 }
1166 
1167 
1168 void MacroAssembler::IsObjectJSStringType(Register object,
1169  Register scratch,
1170  Label* fail) {
1171  ASSERT(kNotStringTag != 0);
1172 
1173  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1174  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1175  tst(scratch, Operand(kIsNotStringMask));
1176  b(ne, fail);
1177 }
1178 
1179 
1180 #ifdef ENABLE_DEBUGGER_SUPPORT
1181 void MacroAssembler::DebugBreak() {
1182  mov(r0, Operand(0, RelocInfo::NONE));
1183  mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1184  CEntryStub ces(1);
1185  ASSERT(AllowThisStubCall(&ces));
1186  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1187 }
1188 #endif
1189 
1190 
1191 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1192  int handler_index) {
1193  // Adjust this code if not the case.
1194  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1195  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1196  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1197  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1198  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1199  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1200 
1201  // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
1202  // We will build up the handler from the bottom by pushing on the stack.
1203  // Set up the code object (r5) and the state (r6) for pushing.
1204  unsigned state =
1205  StackHandler::IndexField::encode(handler_index) |
1206  StackHandler::KindField::encode(kind);
1207  mov(r5, Operand(CodeObject()));
1208  mov(r6, Operand(state));
1209 
1210  // Push the frame pointer, context, state, and code object.
1211  if (kind == StackHandler::JS_ENTRY) {
1212  mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
1213  mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
1214  stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
1215  } else {
1216  stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1217  }
1218 
1219  // Link the current handler as the next handler.
1220  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1221  ldr(r5, MemOperand(r6));
1222  push(r5);
1223  // Set this new handler as the current one.
1224  str(sp, MemOperand(r6));
1225 }
1226 
1227 
1228 void MacroAssembler::PopTryHandler() {
1229  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1230  pop(r1);
1231  mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1232  add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1233  str(r1, MemOperand(ip));
1234 }
1235 
1236 
1237 void MacroAssembler::JumpToHandlerEntry() {
1238  // Compute the handler entry address and jump to it. The handler table is
1239  // a fixed array of (smi-tagged) code offsets.
1240  // r0 = exception, r1 = code object, r2 = state.
1241  ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
1242  add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1243  mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
1244  ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
1245  add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1246  add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump.
1247 }
1248 
1249 
1250 void MacroAssembler::Throw(Register value) {
1251  // Adjust this code if not the case.
1252  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1253  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1254  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1255  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1256  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1257  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1258 
1259  // The exception is expected in r0.
1260  if (!value.is(r0)) {
1261  mov(r0, value);
1262  }
1263  // Drop the stack pointer to the top of the top handler.
1264  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1265  ldr(sp, MemOperand(r3));
1266  // Restore the next handler.
1267  pop(r2);
1268  str(r2, MemOperand(r3));
1269 
1270  // Get the code object (r1) and state (r2). Restore the context and frame
1271  // pointer.
1272  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1273 
1274  // If the handler is a JS frame, restore the context to the frame.
1275  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1276  // or cp.
1277  tst(cp, cp);
1278  str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1279 
1280  JumpToHandlerEntry();
1281 }
1282 
1283 
1284 void MacroAssembler::ThrowUncatchable(Register value) {
1285  // Adjust this code if not the case.
1286  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1287  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1288  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1289  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1290  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1291  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1292 
1293  // The exception is expected in r0.
1294  if (!value.is(r0)) {
1295  mov(r0, value);
1296  }
1297  // Drop the stack pointer to the top of the top stack handler.
1298  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1299  ldr(sp, MemOperand(r3));
1300 
1301  // Unwind the handlers until the ENTRY handler is found.
1302  Label fetch_next, check_kind;
1303  jmp(&check_kind);
1304  bind(&fetch_next);
1305  ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1306 
1307  bind(&check_kind);
1308  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1309  ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1310  tst(r2, Operand(StackHandler::KindField::kMask));
1311  b(ne, &fetch_next);
1312 
1313  // Set the top handler address to next handler past the top ENTRY handler.
1314  pop(r2);
1315  str(r2, MemOperand(r3));
1316  // Get the code object (r1) and state (r2). Clear the context and frame
1317  // pointer (0 was saved in the handler).
1318  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1319 
1320  JumpToHandlerEntry();
1321 }
1322 
1323 
1324 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1325  Register scratch,
1326  Label* miss) {
1327  Label same_contexts;
1328 
1329  ASSERT(!holder_reg.is(scratch));
1330  ASSERT(!holder_reg.is(ip));
1331  ASSERT(!scratch.is(ip));
1332 
1333  // Load current lexical context from the stack frame.
1334  ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1335  // In debug mode, make sure the lexical context is set.
1336 #ifdef DEBUG
1337  cmp(scratch, Operand(0, RelocInfo::NONE));
1338  Check(ne, "we should not have an empty lexical context");
1339 #endif
1340 
1341  // Load the global context of the current context.
1342  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1343  ldr(scratch, FieldMemOperand(scratch, offset));
1344  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
1345 
1346  // Check the context is a global context.
1347  if (emit_debug_code()) {
1348  // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1349  // Cannot use ip as a temporary in this verification code. Due to the fact
1350  // that ip is clobbered as part of cmp with an object Operand.
1351  push(holder_reg); // Temporarily save holder on the stack.
1352  // Read the first word and compare to the global_context_map.
1353  ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1354  LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
1355  cmp(holder_reg, ip);
1356  Check(eq, "JSGlobalObject::global_context should be a global context.");
1357  pop(holder_reg); // Restore holder.
1358  }
1359 
1360  // Check if both contexts are the same.
1361  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
1362  cmp(scratch, Operand(ip));
1363  b(eq, &same_contexts);
1364 
1365  // Check the context is a global context.
1366  if (emit_debug_code()) {
1367  // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1368  // Cannot use ip as a temporary in this verification code. Due to the fact
1369  // that ip is clobbered as part of cmp with an object Operand.
1370  push(holder_reg); // Temporarily save holder on the stack.
1371  mov(holder_reg, ip); // Move ip to its holding place.
1372  LoadRoot(ip, Heap::kNullValueRootIndex);
1373  cmp(holder_reg, ip);
1374  Check(ne, "JSGlobalProxy::context() should not be null.");
1375 
1376  ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1377  LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
1378  cmp(holder_reg, ip);
1379  Check(eq, "JSGlobalObject::global_context should be a global context.");
1380  // Restore ip is not needed. ip is reloaded below.
1381  pop(holder_reg); // Restore holder.
1382  // Restore ip to holder's context.
1383  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
1384  }
1385 
1386  // Check that the security token in the calling global object is
1387  // compatible with the security token in the receiving global
1388  // object.
1389  int token_offset = Context::kHeaderSize +
1390  Context::SECURITY_TOKEN_INDEX * kPointerSize;
1391 
1392  ldr(scratch, FieldMemOperand(scratch, token_offset));
1393  ldr(ip, FieldMemOperand(ip, token_offset));
1394  cmp(scratch, Operand(ip));
1395  b(ne, miss);
1396 
1397  bind(&same_contexts);
1398 }
1399 
1400 
1401 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1402  // First of all we assign the hash seed to scratch.
1403  LoadRoot(scratch, Heap::kHashSeedRootIndex);
1404  SmiUntag(scratch);
1405 
1406  // Xor original key with a seed.
1407  eor(t0, t0, Operand(scratch));
1408 
1409  // Compute the hash code from the untagged key. This must be kept in sync
1410  // with ComputeIntegerHash in utils.h.
1411  //
1412  // hash = ~hash + (hash << 15);
1413  mvn(scratch, Operand(t0));
1414  add(t0, scratch, Operand(t0, LSL, 15));
1415  // hash = hash ^ (hash >> 12);
1416  eor(t0, t0, Operand(t0, LSR, 12));
1417  // hash = hash + (hash << 2);
1418  add(t0, t0, Operand(t0, LSL, 2));
1419  // hash = hash ^ (hash >> 4);
1420  eor(t0, t0, Operand(t0, LSR, 4));
1421  // hash = hash * 2057;
1422  mov(scratch, Operand(t0, LSL, 11));
1423  add(t0, t0, Operand(t0, LSL, 3));
1424  add(t0, t0, scratch);
1425  // hash = hash ^ (hash >> 16);
1426  eor(t0, t0, Operand(t0, LSR, 16));
1427 }
1428 
1429 
1430 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1431  Register elements,
1432  Register key,
1433  Register result,
1434  Register t0,
1435  Register t1,
1436  Register t2) {
1437  // Register use:
1438  //
1439  // elements - holds the slow-case elements of the receiver on entry.
1440  // Unchanged unless 'result' is the same register.
1441  //
1442  // key - holds the smi key on entry.
1443  // Unchanged unless 'result' is the same register.
1444  //
1445  // result - holds the result on exit if the load succeeded.
1446  // Allowed to be the same as 'key' or 'result'.
1447  // Unchanged on bailout so 'key' or 'result' can be used
1448  // in further computation.
1449  //
1450  // Scratch registers:
1451  //
1452  // t0 - holds the untagged key on entry and holds the hash once computed.
1453  //
1454  // t1 - used to hold the capacity mask of the dictionary
1455  //
1456  // t2 - used for the index into the dictionary.
1457  Label done;
1458 
1459  GetNumberHash(t0, t1);
1460 
1461  // Compute the capacity mask.
1462  ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1463  mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
1464  sub(t1, t1, Operand(1));
1465 
1466  // Generate an unrolled loop that performs a few probes before giving up.
1467  static const int kProbes = 4;
1468  for (int i = 0; i < kProbes; i++) {
1469  // Use t2 for index calculations and keep the hash intact in t0.
1470  mov(t2, t0);
1471  // Compute the masked index: (hash + i + i * i) & mask.
1472  if (i > 0) {
1473  add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1474  }
1475  and_(t2, t2, Operand(t1));
1476 
1477  // Scale the index by multiplying by the element size.
1478  ASSERT(SeededNumberDictionary::kEntrySize == 3);
1479  add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
1480 
1481  // Check if the key is identical to the name.
1482  add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1483  ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1484  cmp(key, Operand(ip));
1485  if (i != kProbes - 1) {
1486  b(eq, &done);
1487  } else {
1488  b(ne, miss);
1489  }
1490  }
1491 
1492  bind(&done);
1493  // Check that the value is a normal property.
1494  // t2: elements + (index * kPointerSize)
1495  const int kDetailsOffset =
1496  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1497  ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1498  tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1499  b(ne, miss);
1500 
1501  // Get the value at the masked, scaled index and return.
1502  const int kValueOffset =
1503  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1504  ldr(result, FieldMemOperand(t2, kValueOffset));
1505 }
1506 
1507 
1508 void MacroAssembler::AllocateInNewSpace(int object_size,
1509  Register result,
1510  Register scratch1,
1511  Register scratch2,
1512  Label* gc_required,
1514  if (!FLAG_inline_new) {
1515  if (emit_debug_code()) {
1516  // Trash the registers to simulate an allocation failure.
1517  mov(result, Operand(0x7091));
1518  mov(scratch1, Operand(0x7191));
1519  mov(scratch2, Operand(0x7291));
1520  }
1521  jmp(gc_required);
1522  return;
1523  }
1524 
1525  ASSERT(!result.is(scratch1));
1526  ASSERT(!result.is(scratch2));
1527  ASSERT(!scratch1.is(scratch2));
1528  ASSERT(!scratch1.is(ip));
1529  ASSERT(!scratch2.is(ip));
1530 
1531  // Make object size into bytes.
1532  if ((flags & SIZE_IN_WORDS) != 0) {
1533  object_size *= kPointerSize;
1534  }
1535  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1536 
1537  // Check relative positions of allocation top and limit addresses.
1538  // The values must be adjacent in memory to allow the use of LDM.
1539  // Also, assert that the registers are numbered such that the values
1540  // are loaded in the correct order.
1541  ExternalReference new_space_allocation_top =
1542  ExternalReference::new_space_allocation_top_address(isolate());
1543  ExternalReference new_space_allocation_limit =
1544  ExternalReference::new_space_allocation_limit_address(isolate());
1545  intptr_t top =
1546  reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1547  intptr_t limit =
1548  reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1549  ASSERT((limit - top) == kPointerSize);
1550  ASSERT(result.code() < ip.code());
1551 
1552  // Set up allocation top address and object size registers.
1553  Register topaddr = scratch1;
1554  Register obj_size_reg = scratch2;
1555  mov(topaddr, Operand(new_space_allocation_top));
1556  mov(obj_size_reg, Operand(object_size));
1557 
1558  // This code stores a temporary value in ip. This is OK, as the code below
1559  // does not need ip for implicit literal generation.
1560  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1561  // Load allocation top into result and allocation limit into ip.
1562  ldm(ia, topaddr, result.bit() | ip.bit());
1563  } else {
1564  if (emit_debug_code()) {
1565  // Assert that result actually contains top on entry. ip is used
1566  // immediately below so this use of ip does not cause difference with
1567  // respect to register content between debug and release mode.
1568  ldr(ip, MemOperand(topaddr));
1569  cmp(result, ip);
1570  Check(eq, "Unexpected allocation top");
1571  }
1572  // Load allocation limit into ip. Result already contains allocation top.
1573  ldr(ip, MemOperand(topaddr, limit - top));
1574  }
1575 
1576  // Calculate new top and bail out if new space is exhausted. Use result
1577  // to calculate the new top.
1578  add(scratch2, result, Operand(obj_size_reg), SetCC);
1579  b(cs, gc_required);
1580  cmp(scratch2, Operand(ip));
1581  b(hi, gc_required);
1582  str(scratch2, MemOperand(topaddr));
1583 
1584  // Tag object if requested.
1585  if ((flags & TAG_OBJECT) != 0) {
1586  add(result, result, Operand(kHeapObjectTag));
1587  }
1588 }
1589 
1590 
1591 void MacroAssembler::AllocateInNewSpace(Register object_size,
1592  Register result,
1593  Register scratch1,
1594  Register scratch2,
1595  Label* gc_required,
1596  AllocationFlags flags) {
1597  if (!FLAG_inline_new) {
1598  if (emit_debug_code()) {
1599  // Trash the registers to simulate an allocation failure.
1600  mov(result, Operand(0x7091));
1601  mov(scratch1, Operand(0x7191));
1602  mov(scratch2, Operand(0x7291));
1603  }
1604  jmp(gc_required);
1605  return;
1606  }
1607 
1608  // Assert that the register arguments are different and that none of
1609  // them are ip. ip is used explicitly in the code generated below.
1610  ASSERT(!result.is(scratch1));
1611  ASSERT(!result.is(scratch2));
1612  ASSERT(!scratch1.is(scratch2));
1613  ASSERT(!object_size.is(ip));
1614  ASSERT(!result.is(ip));
1615  ASSERT(!scratch1.is(ip));
1616  ASSERT(!scratch2.is(ip));
1617 
1618  // Check relative positions of allocation top and limit addresses.
1619  // The values must be adjacent in memory to allow the use of LDM.
1620  // Also, assert that the registers are numbered such that the values
1621  // are loaded in the correct order.
1622  ExternalReference new_space_allocation_top =
1623  ExternalReference::new_space_allocation_top_address(isolate());
1624  ExternalReference new_space_allocation_limit =
1625  ExternalReference::new_space_allocation_limit_address(isolate());
1626  intptr_t top =
1627  reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1628  intptr_t limit =
1629  reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1630  ASSERT((limit - top) == kPointerSize);
1631  ASSERT(result.code() < ip.code());
1632 
1633  // Set up allocation top address.
1634  Register topaddr = scratch1;
1635  mov(topaddr, Operand(new_space_allocation_top));
1636 
1637  // This code stores a temporary value in ip. This is OK, as the code below
1638  // does not need ip for implicit literal generation.
1639  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1640  // Load allocation top into result and allocation limit into ip.
1641  ldm(ia, topaddr, result.bit() | ip.bit());
1642  } else {
1643  if (emit_debug_code()) {
1644  // Assert that result actually contains top on entry. ip is used
1645  // immediately below so this use of ip does not cause difference with
1646  // respect to register content between debug and release mode.
1647  ldr(ip, MemOperand(topaddr));
1648  cmp(result, ip);
1649  Check(eq, "Unexpected allocation top");
1650  }
1651  // Load allocation limit into ip. Result already contains allocation top.
1652  ldr(ip, MemOperand(topaddr, limit - top));
1653  }
1654 
1655  // Calculate new top and bail out if new space is exhausted. Use result
1656  // to calculate the new top. Object size may be in words so a shift is
1657  // required to get the number of bytes.
1658  if ((flags & SIZE_IN_WORDS) != 0) {
1659  add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1660  } else {
1661  add(scratch2, result, Operand(object_size), SetCC);
1662  }
1663  b(cs, gc_required);
1664  cmp(scratch2, Operand(ip));
1665  b(hi, gc_required);
1666 
1667  // Update allocation top. result temporarily holds the new top.
1668  if (emit_debug_code()) {
1669  tst(scratch2, Operand(kObjectAlignmentMask));
1670  Check(eq, "Unaligned allocation in new space");
1671  }
1672  str(scratch2, MemOperand(topaddr));
1673 
1674  // Tag object if requested.
1675  if ((flags & TAG_OBJECT) != 0) {
1676  add(result, result, Operand(kHeapObjectTag));
1677  }
1678 }
1679 
1680 
1681 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1682  Register scratch) {
1683  ExternalReference new_space_allocation_top =
1684  ExternalReference::new_space_allocation_top_address(isolate());
1685 
1686  // Make sure the object has no tag before resetting top.
1687  and_(object, object, Operand(~kHeapObjectTagMask));
1688 #ifdef DEBUG
1689  // Check that the object un-allocated is below the current top.
1690  mov(scratch, Operand(new_space_allocation_top));
1691  ldr(scratch, MemOperand(scratch));
1692  cmp(object, scratch);
1693  Check(lt, "Undo allocation of non allocated memory");
1694 #endif
1695  // Write the address of the object to un-allocate as the current top.
1696  mov(scratch, Operand(new_space_allocation_top));
1697  str(object, MemOperand(scratch));
1698 }
1699 
1700 
1701 void MacroAssembler::AllocateTwoByteString(Register result,
1702  Register length,
1703  Register scratch1,
1704  Register scratch2,
1705  Register scratch3,
1706  Label* gc_required) {
1707  // Calculate the number of bytes needed for the characters in the string while
1708  // observing object alignment.
1709  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1710  mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1711  add(scratch1, scratch1,
1712  Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1713  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1714 
1715  // Allocate two-byte string in new space.
1716  AllocateInNewSpace(scratch1,
1717  result,
1718  scratch2,
1719  scratch3,
1720  gc_required,
1721  TAG_OBJECT);
1722 
1723  // Set the map, length and hash field.
1724  InitializeNewString(result,
1725  length,
1726  Heap::kStringMapRootIndex,
1727  scratch1,
1728  scratch2);
1729 }
1730 
1731 
1732 void MacroAssembler::AllocateAsciiString(Register result,
1733  Register length,
1734  Register scratch1,
1735  Register scratch2,
1736  Register scratch3,
1737  Label* gc_required) {
1738  // Calculate the number of bytes needed for the characters in the string while
1739  // observing object alignment.
1740  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
1741  ASSERT(kCharSize == 1);
1742  add(scratch1, length,
1743  Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
1744  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1745 
1746  // Allocate ASCII string in new space.
1747  AllocateInNewSpace(scratch1,
1748  result,
1749  scratch2,
1750  scratch3,
1751  gc_required,
1752  TAG_OBJECT);
1753 
1754  // Set the map, length and hash field.
1755  InitializeNewString(result,
1756  length,
1757  Heap::kAsciiStringMapRootIndex,
1758  scratch1,
1759  scratch2);
1760 }
1761 
1762 
1763 void MacroAssembler::AllocateTwoByteConsString(Register result,
1764  Register length,
1765  Register scratch1,
1766  Register scratch2,
1767  Label* gc_required) {
1768  AllocateInNewSpace(ConsString::kSize,
1769  result,
1770  scratch1,
1771  scratch2,
1772  gc_required,
1773  TAG_OBJECT);
1774 
1775  InitializeNewString(result,
1776  length,
1777  Heap::kConsStringMapRootIndex,
1778  scratch1,
1779  scratch2);
1780 }
1781 
1782 
1783 void MacroAssembler::AllocateAsciiConsString(Register result,
1784  Register length,
1785  Register scratch1,
1786  Register scratch2,
1787  Label* gc_required) {
1788  AllocateInNewSpace(ConsString::kSize,
1789  result,
1790  scratch1,
1791  scratch2,
1792  gc_required,
1793  TAG_OBJECT);
1794 
1795  InitializeNewString(result,
1796  length,
1797  Heap::kConsAsciiStringMapRootIndex,
1798  scratch1,
1799  scratch2);
1800 }
1801 
1802 
1803 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1804  Register length,
1805  Register scratch1,
1806  Register scratch2,
1807  Label* gc_required) {
1808  AllocateInNewSpace(SlicedString::kSize,
1809  result,
1810  scratch1,
1811  scratch2,
1812  gc_required,
1813  TAG_OBJECT);
1814 
1815  InitializeNewString(result,
1816  length,
1817  Heap::kSlicedStringMapRootIndex,
1818  scratch1,
1819  scratch2);
1820 }
1821 
1822 
1823 void MacroAssembler::AllocateAsciiSlicedString(Register result,
1824  Register length,
1825  Register scratch1,
1826  Register scratch2,
1827  Label* gc_required) {
1828  AllocateInNewSpace(SlicedString::kSize,
1829  result,
1830  scratch1,
1831  scratch2,
1832  gc_required,
1833  TAG_OBJECT);
1834 
1835  InitializeNewString(result,
1836  length,
1837  Heap::kSlicedAsciiStringMapRootIndex,
1838  scratch1,
1839  scratch2);
1840 }
1841 
1842 
1843 void MacroAssembler::CompareObjectType(Register object,
1844  Register map,
1845  Register type_reg,
1846  InstanceType type) {
1847  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
1848  CompareInstanceType(map, type_reg, type);
1849 }
1850 
1851 
1852 void MacroAssembler::CompareInstanceType(Register map,
1853  Register type_reg,
1854  InstanceType type) {
1855  ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1856  cmp(type_reg, Operand(type));
1857 }
1858 
1859 
1860 void MacroAssembler::CompareRoot(Register obj,
1861  Heap::RootListIndex index) {
1862  ASSERT(!obj.is(ip));
1863  LoadRoot(ip, index);
1864  cmp(obj, ip);
1865 }
1866 
1867 
1868 void MacroAssembler::CheckFastElements(Register map,
1869  Register scratch,
1870  Label* fail) {
1875  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1876  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1877  b(hi, fail);
1878 }
1879 
1880 
1881 void MacroAssembler::CheckFastObjectElements(Register map,
1882  Register scratch,
1883  Label* fail) {
1888  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1889  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1890  b(ls, fail);
1891  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1892  b(hi, fail);
1893 }
1894 
1895 
1896 void MacroAssembler::CheckFastSmiElements(Register map,
1897  Register scratch,
1898  Label* fail) {
1901  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1902  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1903  b(hi, fail);
1904 }
1905 
1906 
1907 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
1908  Register key_reg,
1909  Register receiver_reg,
1910  Register elements_reg,
1911  Register scratch1,
1912  Register scratch2,
1913  Register scratch3,
1914  Register scratch4,
1915  Label* fail) {
1916  Label smi_value, maybe_nan, have_double_value, is_nan, done;
1917  Register mantissa_reg = scratch2;
1918  Register exponent_reg = scratch3;
1919 
1920  // Handle smi values specially.
1921  JumpIfSmi(value_reg, &smi_value);
1922 
1923  // Ensure that the object is a heap number
1924  CheckMap(value_reg,
1925  scratch1,
1926  isolate()->factory()->heap_number_map(),
1927  fail,
1929 
1930  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
1931  // in the exponent.
1932  mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
1933  ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
1934  cmp(exponent_reg, scratch1);
1935  b(ge, &maybe_nan);
1936 
1937  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
1938 
1939  bind(&have_double_value);
1940  add(scratch1, elements_reg,
1941  Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
1942  str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
1943  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
1944  str(exponent_reg, FieldMemOperand(scratch1, offset));
1945  jmp(&done);
1946 
1947  bind(&maybe_nan);
1948  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
1949  // it's an Infinity, and the non-NaN code path applies.
1950  b(gt, &is_nan);
1951  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
1952  cmp(mantissa_reg, Operand(0));
1953  b(eq, &have_double_value);
1954  bind(&is_nan);
1955  // Load canonical NaN for storing into the double array.
1956  uint64_t nan_int64 = BitCast<uint64_t>(
1957  FixedDoubleArray::canonical_not_the_hole_nan_as_double());
1958  mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
1959  mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
1960  jmp(&have_double_value);
1961 
1962  bind(&smi_value);
1963  add(scratch1, elements_reg,
1964  Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
1965  add(scratch1, scratch1,
1966  Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
1967  // scratch1 is now effective address of the double element
1968 
1969  FloatingPointHelper::Destination destination;
1970  if (CpuFeatures::IsSupported(VFP3)) {
1971  destination = FloatingPointHelper::kVFPRegisters;
1972  } else {
1973  destination = FloatingPointHelper::kCoreRegisters;
1974  }
1975 
1976  Register untagged_value = receiver_reg;
1977  SmiUntag(untagged_value, value_reg);
1978  FloatingPointHelper::ConvertIntToDouble(this,
1979  untagged_value,
1980  destination,
1981  d0,
1982  mantissa_reg,
1983  exponent_reg,
1984  scratch4,
1985  s2);
1986  if (destination == FloatingPointHelper::kVFPRegisters) {
1987  CpuFeatures::Scope scope(VFP3);
1988  vstr(d0, scratch1, 0);
1989  } else {
1990  str(mantissa_reg, MemOperand(scratch1, 0));
1991  str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
1992  }
1993  bind(&done);
1994 }
1995 
1996 
1997 void MacroAssembler::CompareMap(Register obj,
1998  Register scratch,
1999  Handle<Map> map,
2000  Label* early_success,
2001  CompareMapMode mode) {
2002  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2003  CompareMap(scratch, map, early_success, mode);
2004 }
2005 
2006 
2007 void MacroAssembler::CompareMap(Register obj_map,
2008  Handle<Map> map,
2009  Label* early_success,
2010  CompareMapMode mode) {
2011  cmp(obj_map, Operand(map));
2012  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
2013  ElementsKind kind = map->elements_kind();
2014  if (IsFastElementsKind(kind)) {
2015  bool packed = IsFastPackedElementsKind(kind);
2016  Map* current_map = *map;
2017  while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
2018  kind = GetNextMoreGeneralFastElementsKind(kind, packed);
2019  current_map = current_map->LookupElementsTransitionMap(kind);
2020  if (!current_map) break;
2021  b(eq, early_success);
2022  cmp(obj_map, Operand(Handle<Map>(current_map)));
2023  }
2024  }
2025  }
2026 }
2027 
2028 
2029 void MacroAssembler::CheckMap(Register obj,
2030  Register scratch,
2031  Handle<Map> map,
2032  Label* fail,
2033  SmiCheckType smi_check_type,
2034  CompareMapMode mode) {
2035  if (smi_check_type == DO_SMI_CHECK) {
2036  JumpIfSmi(obj, fail);
2037  }
2038 
2039  Label success;
2040  CompareMap(obj, scratch, map, &success, mode);
2041  b(ne, fail);
2042  bind(&success);
2043 }
2044 
2045 
2046 void MacroAssembler::CheckMap(Register obj,
2047  Register scratch,
2048  Heap::RootListIndex index,
2049  Label* fail,
2050  SmiCheckType smi_check_type) {
2051  if (smi_check_type == DO_SMI_CHECK) {
2052  JumpIfSmi(obj, fail);
2053  }
2054  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2055  LoadRoot(ip, index);
2056  cmp(scratch, ip);
2057  b(ne, fail);
2058 }
2059 
2060 
2061 void MacroAssembler::DispatchMap(Register obj,
2062  Register scratch,
2063  Handle<Map> map,
2064  Handle<Code> success,
2065  SmiCheckType smi_check_type) {
2066  Label fail;
2067  if (smi_check_type == DO_SMI_CHECK) {
2068  JumpIfSmi(obj, &fail);
2069  }
2070  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2071  mov(ip, Operand(map));
2072  cmp(scratch, ip);
2073  Jump(success, RelocInfo::CODE_TARGET, eq);
2074  bind(&fail);
2075 }
2076 
2077 
2078 void MacroAssembler::TryGetFunctionPrototype(Register function,
2079  Register result,
2080  Register scratch,
2081  Label* miss,
2082  bool miss_on_bound_function) {
2083  // Check that the receiver isn't a smi.
2084  JumpIfSmi(function, miss);
2085 
2086  // Check that the function really is a function. Load map into result reg.
2087  CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2088  b(ne, miss);
2089 
2090  if (miss_on_bound_function) {
2091  ldr(scratch,
2092  FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2093  ldr(scratch,
2094  FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2095  tst(scratch,
2096  Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2097  b(ne, miss);
2098  }
2099 
2100  // Make sure that the function has an instance prototype.
2101  Label non_instance;
2102  ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2103  tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2104  b(ne, &non_instance);
2105 
2106  // Get the prototype or initial map from the function.
2107  ldr(result,
2108  FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2109 
2110  // If the prototype or initial map is the hole, don't return it and
2111  // simply miss the cache instead. This will allow us to allocate a
2112  // prototype object on-demand in the runtime system.
2113  LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2114  cmp(result, ip);
2115  b(eq, miss);
2116 
2117  // If the function does not have an initial map, we're done.
2118  Label done;
2119  CompareObjectType(result, scratch, scratch, MAP_TYPE);
2120  b(ne, &done);
2121 
2122  // Get the prototype from the initial map.
2123  ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2124  jmp(&done);
2125 
2126  // Non-instance prototype: Fetch prototype from constructor field
2127  // in initial map.
2128  bind(&non_instance);
2129  ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2130 
2131  // All done.
2132  bind(&done);
2133 }
2134 
2135 
2136 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
2137  ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2138  Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
2139 }
2140 
2141 
2142 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2143  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
2144  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2145 }
2146 
2147 
2148 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2149  return ref0.address() - ref1.address();
2150 }
2151 
2152 
2153 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
2154  int stack_space) {
2155  ExternalReference next_address =
2156  ExternalReference::handle_scope_next_address();
2157  const int kNextOffset = 0;
2158  const int kLimitOffset = AddressOffset(
2159  ExternalReference::handle_scope_limit_address(),
2160  next_address);
2161  const int kLevelOffset = AddressOffset(
2162  ExternalReference::handle_scope_level_address(),
2163  next_address);
2164 
2165  // Allocate HandleScope in callee-save registers.
2166  mov(r7, Operand(next_address));
2167  ldr(r4, MemOperand(r7, kNextOffset));
2168  ldr(r5, MemOperand(r7, kLimitOffset));
2169  ldr(r6, MemOperand(r7, kLevelOffset));
2170  add(r6, r6, Operand(1));
2171  str(r6, MemOperand(r7, kLevelOffset));
2172 
2173  // Native call returns to the DirectCEntry stub which redirects to the
2174  // return address pushed on stack (could have moved after GC).
2175  // DirectCEntry stub itself is generated early and never moves.
2176  DirectCEntryStub stub;
2177  stub.GenerateCall(this, function);
2178 
2179  Label promote_scheduled_exception;
2180  Label delete_allocated_handles;
2181  Label leave_exit_frame;
2182 
2183  // If result is non-zero, dereference to get the result value
2184  // otherwise set it to undefined.
2185  cmp(r0, Operand(0));
2186  LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2187  ldr(r0, MemOperand(r0), ne);
2188 
2189  // No more valid handles (the result handle was the last one). Restore
2190  // previous handle scope.
2191  str(r4, MemOperand(r7, kNextOffset));
2192  if (emit_debug_code()) {
2193  ldr(r1, MemOperand(r7, kLevelOffset));
2194  cmp(r1, r6);
2195  Check(eq, "Unexpected level after return from api call");
2196  }
2197  sub(r6, r6, Operand(1));
2198  str(r6, MemOperand(r7, kLevelOffset));
2199  ldr(ip, MemOperand(r7, kLimitOffset));
2200  cmp(r5, ip);
2201  b(ne, &delete_allocated_handles);
2202 
2203  // Check if the function scheduled an exception.
2204  bind(&leave_exit_frame);
2205  LoadRoot(r4, Heap::kTheHoleValueRootIndex);
2206  mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2207  ldr(r5, MemOperand(ip));
2208  cmp(r4, r5);
2209  b(ne, &promote_scheduled_exception);
2210 
2211  // LeaveExitFrame expects unwind space to be in a register.
2212  mov(r4, Operand(stack_space));
2213  LeaveExitFrame(false, r4);
2214  mov(pc, lr);
2215 
2216  bind(&promote_scheduled_exception);
2217  TailCallExternalReference(
2218  ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2219  0,
2220  1);
2221 
2222  // HandleScope limit has changed. Delete allocated extensions.
2223  bind(&delete_allocated_handles);
2224  str(r5, MemOperand(r7, kLimitOffset));
2225  mov(r4, r0);
2226  PrepareCallCFunction(1, r5);
2227  mov(r0, Operand(ExternalReference::isolate_address()));
2228  CallCFunction(
2229  ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2230  mov(r0, r4);
2231  jmp(&leave_exit_frame);
2232 }
2233 
2234 
2235 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2236  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
2237  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
2238 }
2239 
2240 
2241 void MacroAssembler::IllegalOperation(int num_arguments) {
2242  if (num_arguments > 0) {
2243  add(sp, sp, Operand(num_arguments * kPointerSize));
2244  }
2245  LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2246 }
2247 
2248 
2249 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2250  // If the hash field contains an array index pick it out. The assert checks
2251  // that the constants for the maximum number of digits for an array index
2252  // cached in the hash field and the number of bits reserved for it does not
2253  // conflict.
2254  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2255  (1 << String::kArrayIndexValueBits));
2256  // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
2257  // the low kHashShift bits.
2258  STATIC_ASSERT(kSmiTag == 0);
2259  Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2260  mov(index, Operand(hash, LSL, kSmiTagSize));
2261 }
2262 
2263 
2264 void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
2265  Register outHighReg,
2266  Register outLowReg) {
2267  // ARMv7 VFP3 instructions to implement integer to double conversion.
2268  mov(r7, Operand(inReg, ASR, kSmiTagSize));
2269  vmov(s15, r7);
2270  vcvt_f64_s32(d7, s15);
2271  vmov(outLowReg, outHighReg, d7);
2272 }
2273 
2274 
2275 void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
2276  DwVfpRegister result,
2277  Register scratch1,
2278  Register scratch2,
2279  Register heap_number_map,
2280  SwVfpRegister scratch3,
2281  Label* not_number,
2282  ObjectToDoubleFlags flags) {
2283  Label done;
2284  if ((flags & OBJECT_NOT_SMI) == 0) {
2285  Label not_smi;
2286  JumpIfNotSmi(object, &not_smi);
2287  // Remove smi tag and convert to double.
2288  mov(scratch1, Operand(object, ASR, kSmiTagSize));
2289  vmov(scratch3, scratch1);
2290  vcvt_f64_s32(result, scratch3);
2291  b(&done);
2292  bind(&not_smi);
2293  }
2294  // Check for heap number and load double value from it.
2295  ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
2296  sub(scratch2, object, Operand(kHeapObjectTag));
2297  cmp(scratch1, heap_number_map);
2298  b(ne, not_number);
2299  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
2300  // If exponent is all ones the number is either a NaN or +/-Infinity.
2301  ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
2302  Sbfx(scratch1,
2303  scratch1,
2304  HeapNumber::kExponentShift,
2305  HeapNumber::kExponentBits);
2306  // All-one value sign extend to -1.
2307  cmp(scratch1, Operand(-1));
2308  b(eq, not_number);
2309  }
2310  vldr(result, scratch2, HeapNumber::kValueOffset);
2311  bind(&done);
2312 }
2313 
2314 
2315 void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
2316  DwVfpRegister value,
2317  Register scratch1,
2318  SwVfpRegister scratch2) {
2319  mov(scratch1, Operand(smi, ASR, kSmiTagSize));
2320  vmov(scratch2, scratch1);
2321  vcvt_f64_s32(value, scratch2);
2322 }
2323 
2324 
2325 // Tries to get a signed int32 out of a double precision floating point heap
2326 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
2327 // 32bits signed integer range.
2328 void MacroAssembler::ConvertToInt32(Register source,
2329  Register dest,
2330  Register scratch,
2331  Register scratch2,
2332  DwVfpRegister double_scratch,
2333  Label *not_int32) {
2334  if (CpuFeatures::IsSupported(VFP3)) {
2335  CpuFeatures::Scope scope(VFP3);
2336  sub(scratch, source, Operand(kHeapObjectTag));
2337  vldr(double_scratch, scratch, HeapNumber::kValueOffset);
2338  vcvt_s32_f64(double_scratch.low(), double_scratch);
2339  vmov(dest, double_scratch.low());
2340  // Signed vcvt instruction will saturate to the minimum (0x80000000) or
2341  // maximun (0x7fffffff) signed 32bits integer when the double is out of
2342  // range. When substracting one, the minimum signed integer becomes the
2343  // maximun signed integer.
2344  sub(scratch, dest, Operand(1));
2345  cmp(scratch, Operand(LONG_MAX - 1));
2346  // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
2347  b(ge, not_int32);
2348  } else {
2349  // This code is faster for doubles that are in the ranges -0x7fffffff to
2350  // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
2351  // the range of signed int32 values that are not Smis. Jumps to the label
2352  // 'not_int32' if the double isn't in the range -0x80000000.0 to
2353  // 0x80000000.0 (excluding the endpoints).
2354  Label right_exponent, done;
2355  // Get exponent word.
2356  ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
2357  // Get exponent alone in scratch2.
2358  Ubfx(scratch2,
2359  scratch,
2360  HeapNumber::kExponentShift,
2361  HeapNumber::kExponentBits);
2362  // Load dest with zero. We use this either for the final shift or
2363  // for the answer.
2364  mov(dest, Operand(0, RelocInfo::NONE));
2365  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
2366  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
2367  // the exponent that we are fastest at and also the highest exponent we can
2368  // handle here.
2369  const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
2370  // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
2371  // split it up to avoid a constant pool entry. You can't do that in general
2372  // for cmp because of the overflow flag, but we know the exponent is in the
2373  // range 0-2047 so there is no overflow.
2374  int fudge_factor = 0x400;
2375  sub(scratch2, scratch2, Operand(fudge_factor));
2376  cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
2377  // If we have a match of the int32-but-not-Smi exponent then skip some
2378  // logic.
2379  b(eq, &right_exponent);
2380  // If the exponent is higher than that then go to slow case. This catches
2381  // numbers that don't fit in a signed int32, infinities and NaNs.
2382  b(gt, not_int32);
2383 
2384  // We know the exponent is smaller than 30 (biased). If it is less than
2385  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
2386  // it rounds to zero.
2387  const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
2388  sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
2389  // Dest already has a Smi zero.
2390  b(lt, &done);
2391 
2392  // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
2393  // get how much to shift down.
2394  rsb(dest, scratch2, Operand(30));
2395 
2396  bind(&right_exponent);
2397  // Get the top bits of the mantissa.
2398  and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
2399  // Put back the implicit 1.
2400  orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
2401  // Shift up the mantissa bits to take up the space the exponent used to
2402  // take. We just orred in the implicit bit so that took care of one and
2403  // we want to leave the sign bit 0 so we subtract 2 bits from the shift
2404  // distance.
2405  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
2406  mov(scratch2, Operand(scratch2, LSL, shift_distance));
2407  // Put sign in zero flag.
2408  tst(scratch, Operand(HeapNumber::kSignMask));
2409  // Get the second half of the double. For some exponents we don't
2410  // actually need this because the bits get shifted out again, but
2411  // it's probably slower to test than just to do it.
2412  ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
2413  // Shift down 22 bits to get the last 10 bits.
2414  orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
2415  // Move down according to the exponent.
2416  mov(dest, Operand(scratch, LSR, dest));
2417  // Fix sign if sign bit was set.
2418  rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
2419  bind(&done);
2420  }
2421 }
2422 
2423 
2424 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
2425  SwVfpRegister result,
2426  DwVfpRegister double_input,
2427  Register scratch1,
2428  Register scratch2,
2429  CheckForInexactConversion check_inexact) {
2430  ASSERT(CpuFeatures::IsSupported(VFP3));
2431  CpuFeatures::Scope scope(VFP3);
2432  Register prev_fpscr = scratch1;
2433  Register scratch = scratch2;
2434 
2435  int32_t check_inexact_conversion =
2436  (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
2437 
2438  // Set custom FPCSR:
2439  // - Set rounding mode.
2440  // - Clear vfp cumulative exception flags.
2441  // - Make sure Flush-to-zero mode control bit is unset.
2442  vmrs(prev_fpscr);
2443  bic(scratch,
2444  prev_fpscr,
2445  Operand(kVFPExceptionMask |
2446  check_inexact_conversion |
2449  // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
2450  if (rounding_mode != kRoundToNearest) {
2451  orr(scratch, scratch, Operand(rounding_mode));
2452  }
2453  vmsr(scratch);
2454 
2455  // Convert the argument to an integer.
2456  vcvt_s32_f64(result,
2457  double_input,
2458  (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
2459  : kFPSCRRounding);
2460 
2461  // Retrieve FPSCR.
2462  vmrs(scratch);
2463  // Restore FPSCR.
2464  vmsr(prev_fpscr);
2465  // Check for vfp exceptions.
2466  tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
2467 }
2468 
2469 
2470 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2471  Register input_high,
2472  Register input_low,
2473  Register scratch) {
2474  Label done, normal_exponent, restore_sign;
2475 
2476  // Extract the biased exponent in result.
2477  Ubfx(result,
2478  input_high,
2479  HeapNumber::kExponentShift,
2480  HeapNumber::kExponentBits);
2481 
2482  // Check for Infinity and NaNs, which should return 0.
2483  cmp(result, Operand(HeapNumber::kExponentMask));
2484  mov(result, Operand(0), LeaveCC, eq);
2485  b(eq, &done);
2486 
2487  // Express exponent as delta to (number of mantissa bits + 31).
2488  sub(result,
2489  result,
2490  Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
2491  SetCC);
2492 
2493  // If the delta is strictly positive, all bits would be shifted away,
2494  // which means that we can return 0.
2495  b(le, &normal_exponent);
2496  mov(result, Operand(0));
2497  b(&done);
2498 
2499  bind(&normal_exponent);
2500  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2501  // Calculate shift.
2502  add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
2503 
2504  // Save the sign.
2505  Register sign = result;
2506  result = no_reg;
2507  and_(sign, input_high, Operand(HeapNumber::kSignMask));
2508 
2509  // Set the implicit 1 before the mantissa part in input_high.
2510  orr(input_high,
2511  input_high,
2512  Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2513  // Shift the mantissa bits to the correct position.
2514  // We don't need to clear non-mantissa bits as they will be shifted away.
2515  // If they weren't, it would mean that the answer is in the 32bit range.
2516  mov(input_high, Operand(input_high, LSL, scratch));
2517 
2518  // Replace the shifted bits with bits from the lower mantissa word.
2519  Label pos_shift, shift_done;
2520  rsb(scratch, scratch, Operand(32), SetCC);
2521  b(&pos_shift, ge);
2522 
2523  // Negate scratch.
2524  rsb(scratch, scratch, Operand(0));
2525  mov(input_low, Operand(input_low, LSL, scratch));
2526  b(&shift_done);
2527 
2528  bind(&pos_shift);
2529  mov(input_low, Operand(input_low, LSR, scratch));
2530 
2531  bind(&shift_done);
2532  orr(input_high, input_high, Operand(input_low));
2533  // Restore sign if necessary.
2534  cmp(sign, Operand(0));
2535  result = sign;
2536  sign = no_reg;
2537  rsb(result, input_high, Operand(0), LeaveCC, ne);
2538  mov(result, input_high, LeaveCC, eq);
2539  bind(&done);
2540 }
2541 
2542 
2543 void MacroAssembler::EmitECMATruncate(Register result,
2544  DwVfpRegister double_input,
2545  SwVfpRegister single_scratch,
2546  Register scratch,
2547  Register input_high,
2548  Register input_low) {
2549  CpuFeatures::Scope scope(VFP3);
2550  ASSERT(!input_high.is(result));
2551  ASSERT(!input_low.is(result));
2552  ASSERT(!input_low.is(input_high));
2553  ASSERT(!scratch.is(result) &&
2554  !scratch.is(input_high) &&
2555  !scratch.is(input_low));
2556  ASSERT(!single_scratch.is(double_input.low()) &&
2557  !single_scratch.is(double_input.high()));
2558 
2559  Label done;
2560 
2561  // Clear cumulative exception flags.
2562  ClearFPSCRBits(kVFPExceptionMask, scratch);
2563  // Try a conversion to a signed integer.
2564  vcvt_s32_f64(single_scratch, double_input);
2565  vmov(result, single_scratch);
2566  // Retrieve he FPSCR.
2567  vmrs(scratch);
2568  // Check for overflow and NaNs.
2569  tst(scratch, Operand(kVFPOverflowExceptionBit |
2572  // If we had no exceptions we are done.
2573  b(eq, &done);
2574 
2575  // Load the double value and perform a manual truncation.
2576  vmov(input_low, input_high, double_input);
2577  EmitOutOfInt32RangeTruncate(result,
2578  input_high,
2579  input_low,
2580  scratch);
2581  bind(&done);
2582 }
2583 
2584 
2585 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2586  Register src,
2587  int num_least_bits) {
2588  if (CpuFeatures::IsSupported(ARMv7)) {
2589  ubfx(dst, src, kSmiTagSize, num_least_bits);
2590  } else {
2591  mov(dst, Operand(src, ASR, kSmiTagSize));
2592  and_(dst, dst, Operand((1 << num_least_bits) - 1));
2593  }
2594 }
2595 
2596 
2597 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2598  Register src,
2599  int num_least_bits) {
2600  and_(dst, src, Operand((1 << num_least_bits) - 1));
2601 }
2602 
2603 
2604 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2605  int num_arguments) {
2606  // All parameters are on the stack. r0 has the return value after call.
2607 
2608  // If the expected number of arguments of the runtime function is
2609  // constant, we check that the actual number of arguments match the
2610  // expectation.
2611  if (f->nargs >= 0 && f->nargs != num_arguments) {
2612  IllegalOperation(num_arguments);
2613  return;
2614  }
2615 
2616  // TODO(1236192): Most runtime routines don't need the number of
2617  // arguments passed in because it is constant. At some point we
2618  // should remove this need and make the runtime routine entry code
2619  // smarter.
2620  mov(r0, Operand(num_arguments));
2621  mov(r1, Operand(ExternalReference(f, isolate())));
2622  CEntryStub stub(1);
2623  CallStub(&stub);
2624 }
2625 
2626 
2627 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
2628  CallRuntime(Runtime::FunctionForId(fid), num_arguments);
2629 }
2630 
2631 
2632 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
2633  const Runtime::Function* function = Runtime::FunctionForId(id);
2634  mov(r0, Operand(function->nargs));
2635  mov(r1, Operand(ExternalReference(function, isolate())));
2636  CEntryStub stub(1, kSaveFPRegs);
2637  CallStub(&stub);
2638 }
2639 
2640 
2641 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2642  int num_arguments) {
2643  mov(r0, Operand(num_arguments));
2644  mov(r1, Operand(ext));
2645 
2646  CEntryStub stub(1);
2647  CallStub(&stub);
2648 }
2649 
2650 
2651 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2652  int num_arguments,
2653  int result_size) {
2654  // TODO(1236192): Most runtime routines don't need the number of
2655  // arguments passed in because it is constant. At some point we
2656  // should remove this need and make the runtime routine entry code
2657  // smarter.
2658  mov(r0, Operand(num_arguments));
2659  JumpToExternalReference(ext);
2660 }
2661 
2662 
2663 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2664  int num_arguments,
2665  int result_size) {
2666  TailCallExternalReference(ExternalReference(fid, isolate()),
2667  num_arguments,
2668  result_size);
2669 }
2670 
2671 
2672 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2673 #if defined(__thumb__)
2674  // Thumb mode builtin.
2675  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2676 #endif
2677  mov(r1, Operand(builtin));
2678  CEntryStub stub(1);
2679  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2680 }
2681 
2682 
2683 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2684  InvokeFlag flag,
2685  const CallWrapper& call_wrapper) {
2686  // You can't call a builtin without a valid frame.
2687  ASSERT(flag == JUMP_FUNCTION || has_frame());
2688 
2689  GetBuiltinEntry(r2, id);
2690  if (flag == CALL_FUNCTION) {
2691  call_wrapper.BeforeCall(CallSize(r2));
2692  SetCallKind(r5, CALL_AS_METHOD);
2693  Call(r2);
2694  call_wrapper.AfterCall();
2695  } else {
2696  ASSERT(flag == JUMP_FUNCTION);
2697  SetCallKind(r5, CALL_AS_METHOD);
2698  Jump(r2);
2699  }
2700 }
2701 
2702 
2703 void MacroAssembler::GetBuiltinFunction(Register target,
2704  Builtins::JavaScript id) {
2705  // Load the builtins object into target register.
2706  ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2707  ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2708  // Load the JavaScript builtin function from the builtins object.
2709  ldr(target, FieldMemOperand(target,
2710  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2711 }
2712 
2713 
2714 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2715  ASSERT(!target.is(r1));
2716  GetBuiltinFunction(r1, id);
2717  // Load the code entry point from the builtins object.
2718  ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2719 }
2720 
2721 
2722 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2723  Register scratch1, Register scratch2) {
2724  if (FLAG_native_code_counters && counter->Enabled()) {
2725  mov(scratch1, Operand(value));
2726  mov(scratch2, Operand(ExternalReference(counter)));
2727  str(scratch1, MemOperand(scratch2));
2728  }
2729 }
2730 
2731 
2732 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2733  Register scratch1, Register scratch2) {
2734  ASSERT(value > 0);
2735  if (FLAG_native_code_counters && counter->Enabled()) {
2736  mov(scratch2, Operand(ExternalReference(counter)));
2737  ldr(scratch1, MemOperand(scratch2));
2738  add(scratch1, scratch1, Operand(value));
2739  str(scratch1, MemOperand(scratch2));
2740  }
2741 }
2742 
2743 
2744 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2745  Register scratch1, Register scratch2) {
2746  ASSERT(value > 0);
2747  if (FLAG_native_code_counters && counter->Enabled()) {
2748  mov(scratch2, Operand(ExternalReference(counter)));
2749  ldr(scratch1, MemOperand(scratch2));
2750  sub(scratch1, scratch1, Operand(value));
2751  str(scratch1, MemOperand(scratch2));
2752  }
2753 }
2754 
2755 
2756 void MacroAssembler::Assert(Condition cond, const char* msg) {
2757  if (emit_debug_code())
2758  Check(cond, msg);
2759 }
2760 
2761 
2762 void MacroAssembler::AssertRegisterIsRoot(Register reg,
2763  Heap::RootListIndex index) {
2764  if (emit_debug_code()) {
2765  LoadRoot(ip, index);
2766  cmp(reg, ip);
2767  Check(eq, "Register did not match expected root");
2768  }
2769 }
2770 
2771 
2772 void MacroAssembler::AssertFastElements(Register elements) {
2773  if (emit_debug_code()) {
2774  ASSERT(!elements.is(ip));
2775  Label ok;
2776  push(elements);
2777  ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2778  LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2779  cmp(elements, ip);
2780  b(eq, &ok);
2781  LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2782  cmp(elements, ip);
2783  b(eq, &ok);
2784  LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2785  cmp(elements, ip);
2786  b(eq, &ok);
2787  Abort("JSObject with fast elements map has slow elements");
2788  bind(&ok);
2789  pop(elements);
2790  }
2791 }
2792 
2793 
2794 void MacroAssembler::Check(Condition cond, const char* msg) {
2795  Label L;
2796  b(cond, &L);
2797  Abort(msg);
2798  // will not return here
2799  bind(&L);
2800 }
2801 
2802 
2803 void MacroAssembler::Abort(const char* msg) {
2804  Label abort_start;
2805  bind(&abort_start);
2806  // We want to pass the msg string like a smi to avoid GC
2807  // problems, however msg is not guaranteed to be aligned
2808  // properly. Instead, we pass an aligned pointer that is
2809  // a proper v8 smi, but also pass the alignment difference
2810  // from the real pointer as a smi.
2811  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2812  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2813  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2814 #ifdef DEBUG
2815  if (msg != NULL) {
2816  RecordComment("Abort message: ");
2817  RecordComment(msg);
2818  }
2819 #endif
2820 
2821  mov(r0, Operand(p0));
2822  push(r0);
2823  mov(r0, Operand(Smi::FromInt(p1 - p0)));
2824  push(r0);
2825  // Disable stub call restrictions to always allow calls to abort.
2826  if (!has_frame_) {
2827  // We don't actually want to generate a pile of code for this, so just
2828  // claim there is a stack frame, without generating one.
2829  FrameScope scope(this, StackFrame::NONE);
2830  CallRuntime(Runtime::kAbort, 2);
2831  } else {
2832  CallRuntime(Runtime::kAbort, 2);
2833  }
2834  // will not return here
2835  if (is_const_pool_blocked()) {
2836  // If the calling code cares about the exact number of
2837  // instructions generated, we insert padding here to keep the size
2838  // of the Abort macro constant.
2839  static const int kExpectedAbortInstructions = 10;
2840  int abort_instructions = InstructionsGeneratedSince(&abort_start);
2841  ASSERT(abort_instructions <= kExpectedAbortInstructions);
2842  while (abort_instructions++ < kExpectedAbortInstructions) {
2843  nop();
2844  }
2845  }
2846 }
2847 
2848 
2849 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2850  if (context_chain_length > 0) {
2851  // Move up the chain of contexts to the context containing the slot.
2852  ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2853  for (int i = 1; i < context_chain_length; i++) {
2854  ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2855  }
2856  } else {
2857  // Slot is in the current function context. Move it into the
2858  // destination register in case we store into it (the write barrier
2859  // cannot be allowed to destroy the context in esi).
2860  mov(dst, cp);
2861  }
2862 }
2863 
2864 
2865 void MacroAssembler::LoadTransitionedArrayMapConditional(
2866  ElementsKind expected_kind,
2867  ElementsKind transitioned_kind,
2868  Register map_in_out,
2869  Register scratch,
2870  Label* no_map_match) {
2871  // Load the global or builtins object from the current context.
2872  ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2873  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
2874 
2875  // Check that the function's map is the same as the expected cached map.
2876  ldr(scratch,
2877  MemOperand(scratch,
2878  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2879  size_t offset = expected_kind * kPointerSize +
2880  FixedArrayBase::kHeaderSize;
2881  cmp(map_in_out, scratch);
2882  b(ne, no_map_match);
2883 
2884  // Use the transitioned cached map.
2885  offset = transitioned_kind * kPointerSize +
2886  FixedArrayBase::kHeaderSize;
2887  ldr(map_in_out, FieldMemOperand(scratch, offset));
2888 }
2889 
2890 
2891 void MacroAssembler::LoadInitialArrayMap(
2892  Register function_in, Register scratch,
2893  Register map_out, bool can_have_holes) {
2894  ASSERT(!function_in.is(map_out));
2895  Label done;
2896  ldr(map_out, FieldMemOperand(function_in,
2897  JSFunction::kPrototypeOrInitialMapOffset));
2898  if (!FLAG_smi_only_arrays) {
2899  ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
2900  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2901  kind,
2902  map_out,
2903  scratch,
2904  &done);
2905  } else if (can_have_holes) {
2906  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2908  map_out,
2909  scratch,
2910  &done);
2911  }
2912  bind(&done);
2913 }
2914 
2915 
2916 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2917  // Load the global or builtins object from the current context.
2918  ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2919  // Load the global context from the global or builtins object.
2920  ldr(function, FieldMemOperand(function,
2921  GlobalObject::kGlobalContextOffset));
2922  // Load the function from the global context.
2923  ldr(function, MemOperand(function, Context::SlotOffset(index)));
2924 }
2925 
2926 
2927 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2928  Register map,
2929  Register scratch) {
2930  // Load the initial map. The global functions all have initial maps.
2931  ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2932  if (emit_debug_code()) {
2933  Label ok, fail;
2934  CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2935  b(&ok);
2936  bind(&fail);
2937  Abort("Global functions must have initial map");
2938  bind(&ok);
2939  }
2940 }
2941 
2942 
2943 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2944  Register reg,
2945  Register scratch,
2946  Label* not_power_of_two_or_zero) {
2947  sub(scratch, reg, Operand(1), SetCC);
2948  b(mi, not_power_of_two_or_zero);
2949  tst(scratch, reg);
2950  b(ne, not_power_of_two_or_zero);
2951 }
2952 
2953 
2954 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2955  Register reg,
2956  Register scratch,
2957  Label* zero_and_neg,
2958  Label* not_power_of_two) {
2959  sub(scratch, reg, Operand(1), SetCC);
2960  b(mi, zero_and_neg);
2961  tst(scratch, reg);
2962  b(ne, not_power_of_two);
2963 }
2964 
2965 
2966 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2967  Register reg2,
2968  Label* on_not_both_smi) {
2969  STATIC_ASSERT(kSmiTag == 0);
2970  tst(reg1, Operand(kSmiTagMask));
2971  tst(reg2, Operand(kSmiTagMask), eq);
2972  b(ne, on_not_both_smi);
2973 }
2974 
2975 
2976 void MacroAssembler::UntagAndJumpIfSmi(
2977  Register dst, Register src, Label* smi_case) {
2978  STATIC_ASSERT(kSmiTag == 0);
2979  mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
2980  b(cc, smi_case); // Shifter carry is not set for a smi.
2981 }
2982 
2983 
2984 void MacroAssembler::UntagAndJumpIfNotSmi(
2985  Register dst, Register src, Label* non_smi_case) {
2986  STATIC_ASSERT(kSmiTag == 0);
2987  mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
2988  b(cs, non_smi_case); // Shifter carry is set for a non-smi.
2989 }
2990 
2991 
2992 void MacroAssembler::JumpIfEitherSmi(Register reg1,
2993  Register reg2,
2994  Label* on_either_smi) {
2995  STATIC_ASSERT(kSmiTag == 0);
2996  tst(reg1, Operand(kSmiTagMask));
2997  tst(reg2, Operand(kSmiTagMask), ne);
2998  b(eq, on_either_smi);
2999 }
3000 
3001 
3002 void MacroAssembler::AbortIfSmi(Register object) {
3003  STATIC_ASSERT(kSmiTag == 0);
3004  tst(object, Operand(kSmiTagMask));
3005  Assert(ne, "Operand is a smi");
3006 }
3007 
3008 
3009 void MacroAssembler::AbortIfNotSmi(Register object) {
3010  STATIC_ASSERT(kSmiTag == 0);
3011  tst(object, Operand(kSmiTagMask));
3012  Assert(eq, "Operand is not smi");
3013 }
3014 
3015 
3016 void MacroAssembler::AbortIfNotString(Register object) {
3017  STATIC_ASSERT(kSmiTag == 0);
3018  tst(object, Operand(kSmiTagMask));
3019  Assert(ne, "Operand is not a string");
3020  push(object);
3021  ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3022  CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3023  pop(object);
3024  Assert(lo, "Operand is not a string");
3025 }
3026 
3027 
3028 
3029 void MacroAssembler::AbortIfNotRootValue(Register src,
3030  Heap::RootListIndex root_value_index,
3031  const char* message) {
3032  CompareRoot(src, root_value_index);
3033  Assert(eq, message);
3034 }
3035 
3036 
3037 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3038  Register heap_number_map,
3039  Register scratch,
3040  Label* on_not_heap_number) {
3041  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3042  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3043  cmp(scratch, heap_number_map);
3044  b(ne, on_not_heap_number);
3045 }
3046 
3047 
3048 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3049  Register first,
3050  Register second,
3051  Register scratch1,
3052  Register scratch2,
3053  Label* failure) {
3054  // Test that both first and second are sequential ASCII strings.
3055  // Assume that they are non-smis.
3056  ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3057  ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3058  ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3059  ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3060 
3061  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3062  scratch2,
3063  scratch1,
3064  scratch2,
3065  failure);
3066 }
3067 
3068 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3069  Register second,
3070  Register scratch1,
3071  Register scratch2,
3072  Label* failure) {
3073  // Check that neither is a smi.
3074  STATIC_ASSERT(kSmiTag == 0);
3075  and_(scratch1, first, Operand(second));
3076  JumpIfSmi(scratch1, failure);
3077  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3078  second,
3079  scratch1,
3080  scratch2,
3081  failure);
3082 }
3083 
3084 
3085 // Allocates a heap number or jumps to the need_gc label if the young space
3086 // is full and a scavenge is needed.
3087 void MacroAssembler::AllocateHeapNumber(Register result,
3088  Register scratch1,
3089  Register scratch2,
3090  Register heap_number_map,
3091  Label* gc_required) {
3092  // Allocate an object in the heap for the heap number and tag it as a heap
3093  // object.
3094  AllocateInNewSpace(HeapNumber::kSize,
3095  result,
3096  scratch1,
3097  scratch2,
3098  gc_required,
3099  TAG_OBJECT);
3100 
3101  // Store heap number map in the allocated object.
3102  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3103  str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3104 }
3105 
3106 
3107 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3108  DwVfpRegister value,
3109  Register scratch1,
3110  Register scratch2,
3111  Register heap_number_map,
3112  Label* gc_required) {
3113  AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3114  sub(scratch1, result, Operand(kHeapObjectTag));
3115  vstr(value, scratch1, HeapNumber::kValueOffset);
3116 }
3117 
3118 
3119 // Copies a fixed number of fields of heap objects from src to dst.
3120 void MacroAssembler::CopyFields(Register dst,
3121  Register src,
3122  RegList temps,
3123  int field_count) {
3124  // At least one bit set in the first 15 registers.
3125  ASSERT((temps & ((1 << 15) - 1)) != 0);
3126  ASSERT((temps & dst.bit()) == 0);
3127  ASSERT((temps & src.bit()) == 0);
3128  // Primitive implementation using only one temporary register.
3129 
3130  Register tmp = no_reg;
3131  // Find a temp register in temps list.
3132  for (int i = 0; i < 15; i++) {
3133  if ((temps & (1 << i)) != 0) {
3134  tmp.set_code(i);
3135  break;
3136  }
3137  }
3138  ASSERT(!tmp.is(no_reg));
3139 
3140  for (int i = 0; i < field_count; i++) {
3141  ldr(tmp, FieldMemOperand(src, i * kPointerSize));
3142  str(tmp, FieldMemOperand(dst, i * kPointerSize));
3143  }
3144 }
3145 
3146 
3147 void MacroAssembler::CopyBytes(Register src,
3148  Register dst,
3149  Register length,
3150  Register scratch) {
3151  Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3152 
3153  // Align src before copying in word size chunks.
3154  bind(&align_loop);
3155  cmp(length, Operand(0));
3156  b(eq, &done);
3157  bind(&align_loop_1);
3158  tst(src, Operand(kPointerSize - 1));
3159  b(eq, &word_loop);
3160  ldrb(scratch, MemOperand(src, 1, PostIndex));
3161  strb(scratch, MemOperand(dst, 1, PostIndex));
3162  sub(length, length, Operand(1), SetCC);
3163  b(ne, &byte_loop_1);
3164 
3165  // Copy bytes in word size chunks.
3166  bind(&word_loop);
3167  if (emit_debug_code()) {
3168  tst(src, Operand(kPointerSize - 1));
3169  Assert(eq, "Expecting alignment for CopyBytes");
3170  }
3171  cmp(length, Operand(kPointerSize));
3172  b(lt, &byte_loop);
3173  ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3174 #if CAN_USE_UNALIGNED_ACCESSES
3175  str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3176 #else
3177  strb(scratch, MemOperand(dst, 1, PostIndex));
3178  mov(scratch, Operand(scratch, LSR, 8));
3179  strb(scratch, MemOperand(dst, 1, PostIndex));
3180  mov(scratch, Operand(scratch, LSR, 8));
3181  strb(scratch, MemOperand(dst, 1, PostIndex));
3182  mov(scratch, Operand(scratch, LSR, 8));
3183  strb(scratch, MemOperand(dst, 1, PostIndex));
3184 #endif
3185  sub(length, length, Operand(kPointerSize));
3186  b(&word_loop);
3187 
3188  // Copy the last bytes if any left.
3189  bind(&byte_loop);
3190  cmp(length, Operand(0));
3191  b(eq, &done);
3192  bind(&byte_loop_1);
3193  ldrb(scratch, MemOperand(src, 1, PostIndex));
3194  strb(scratch, MemOperand(dst, 1, PostIndex));
3195  sub(length, length, Operand(1), SetCC);
3196  b(ne, &byte_loop_1);
3197  bind(&done);
3198 }
3199 
3200 
3201 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3202  Register end_offset,
3203  Register filler) {
3204  Label loop, entry;
3205  b(&entry);
3206  bind(&loop);
3207  str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3208  bind(&entry);
3209  cmp(start_offset, end_offset);
3210  b(lt, &loop);
3211 }
3212 
3213 
3214 void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
3215  Register source, // Input.
3216  Register scratch) {
3217  ASSERT(!zeros.is(source) || !source.is(scratch));
3218  ASSERT(!zeros.is(scratch));
3219  ASSERT(!scratch.is(ip));
3220  ASSERT(!source.is(ip));
3221  ASSERT(!zeros.is(ip));
3222 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
3223  clz(zeros, source); // This instruction is only supported after ARM5.
3224 #else
3225  // Order of the next two lines is important: zeros register
3226  // can be the same as source register.
3227  Move(scratch, source);
3228  mov(zeros, Operand(0, RelocInfo::NONE));
3229  // Top 16.
3230  tst(scratch, Operand(0xffff0000));
3231  add(zeros, zeros, Operand(16), LeaveCC, eq);
3232  mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
3233  // Top 8.
3234  tst(scratch, Operand(0xff000000));
3235  add(zeros, zeros, Operand(8), LeaveCC, eq);
3236  mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
3237  // Top 4.
3238  tst(scratch, Operand(0xf0000000));
3239  add(zeros, zeros, Operand(4), LeaveCC, eq);
3240  mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
3241  // Top 2.
3242  tst(scratch, Operand(0xc0000000));
3243  add(zeros, zeros, Operand(2), LeaveCC, eq);
3244  mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
3245  // Top bit.
3246  tst(scratch, Operand(0x80000000u));
3247  add(zeros, zeros, Operand(1), LeaveCC, eq);
3248 #endif
3249 }
3250 
3251 
3252 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3253  Register first,
3254  Register second,
3255  Register scratch1,
3256  Register scratch2,
3257  Label* failure) {
3258  int kFlatAsciiStringMask =
3260  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
3261  and_(scratch1, first, Operand(kFlatAsciiStringMask));
3262  and_(scratch2, second, Operand(kFlatAsciiStringMask));
3263  cmp(scratch1, Operand(kFlatAsciiStringTag));
3264  // Ignore second test if first test failed.
3265  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
3266  b(ne, failure);
3267 }
3268 
3269 
3270 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3271  Register scratch,
3272  Label* failure) {
3273  int kFlatAsciiStringMask =
3275  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
3276  and_(scratch, type, Operand(kFlatAsciiStringMask));
3277  cmp(scratch, Operand(kFlatAsciiStringTag));
3278  b(ne, failure);
3279 }
3280 
3281 static const int kRegisterPassedArguments = 4;
3282 
3283 
3284 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3285  int num_double_arguments) {
3286  int stack_passed_words = 0;
3287  if (use_eabi_hardfloat()) {
3288  // In the hard floating point calling convention, we can use
3289  // all double registers to pass doubles.
3290  if (num_double_arguments > DoubleRegister::kNumRegisters) {
3291  stack_passed_words +=
3292  2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3293  }
3294  } else {
3295  // In the soft floating point calling convention, every double
3296  // argument is passed using two registers.
3297  num_reg_arguments += 2 * num_double_arguments;
3298  }
3299  // Up to four simple arguments are passed in registers r0..r3.
3300  if (num_reg_arguments > kRegisterPassedArguments) {
3301  stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3302  }
3303  return stack_passed_words;
3304 }
3305 
3306 
3307 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3308  int num_double_arguments,
3309  Register scratch) {
3310  int frame_alignment = ActivationFrameAlignment();
3311  int stack_passed_arguments = CalculateStackPassedWords(
3312  num_reg_arguments, num_double_arguments);
3313  if (frame_alignment > kPointerSize) {
3314  // Make stack end at alignment and make room for num_arguments - 4 words
3315  // and the original value of sp.
3316  mov(scratch, sp);
3317  sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3318  ASSERT(IsPowerOf2(frame_alignment));
3319  and_(sp, sp, Operand(-frame_alignment));
3320  str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3321  } else {
3322  sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3323  }
3324 }
3325 
3326 
3327 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3328  Register scratch) {
3329  PrepareCallCFunction(num_reg_arguments, 0, scratch);
3330 }
3331 
3332 
3333 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3334  if (use_eabi_hardfloat()) {
3335  Move(d0, dreg);
3336  } else {
3337  vmov(r0, r1, dreg);
3338  }
3339 }
3340 
3341 
3342 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3343  DoubleRegister dreg2) {
3344  if (use_eabi_hardfloat()) {
3345  if (dreg2.is(d0)) {
3346  ASSERT(!dreg1.is(d1));
3347  Move(d1, dreg2);
3348  Move(d0, dreg1);
3349  } else {
3350  Move(d0, dreg1);
3351  Move(d1, dreg2);
3352  }
3353  } else {
3354  vmov(r0, r1, dreg1);
3355  vmov(r2, r3, dreg2);
3356  }
3357 }
3358 
3359 
3360 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3361  Register reg) {
3362  if (use_eabi_hardfloat()) {
3363  Move(d0, dreg);
3364  Move(r0, reg);
3365  } else {
3366  Move(r2, reg);
3367  vmov(r0, r1, dreg);
3368  }
3369 }
3370 
3371 
3372 void MacroAssembler::CallCFunction(ExternalReference function,
3373  int num_reg_arguments,
3374  int num_double_arguments) {
3375  mov(ip, Operand(function));
3376  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3377 }
3378 
3379 
3380 void MacroAssembler::CallCFunction(Register function,
3381  int num_reg_arguments,
3382  int num_double_arguments) {
3383  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3384 }
3385 
3386 
3387 void MacroAssembler::CallCFunction(ExternalReference function,
3388  int num_arguments) {
3389  CallCFunction(function, num_arguments, 0);
3390 }
3391 
3392 
3393 void MacroAssembler::CallCFunction(Register function,
3394  int num_arguments) {
3395  CallCFunction(function, num_arguments, 0);
3396 }
3397 
3398 
3399 void MacroAssembler::CallCFunctionHelper(Register function,
3400  int num_reg_arguments,
3401  int num_double_arguments) {
3402  ASSERT(has_frame());
3403  // Make sure that the stack is aligned before calling a C function unless
3404  // running in the simulator. The simulator has its own alignment check which
3405  // provides more information.
3406 #if defined(V8_HOST_ARCH_ARM)
3407  if (emit_debug_code()) {
3408  int frame_alignment = OS::ActivationFrameAlignment();
3409  int frame_alignment_mask = frame_alignment - 1;
3410  if (frame_alignment > kPointerSize) {
3411  ASSERT(IsPowerOf2(frame_alignment));
3412  Label alignment_as_expected;
3413  tst(sp, Operand(frame_alignment_mask));
3414  b(eq, &alignment_as_expected);
3415  // Don't use Check here, as it will call Runtime_Abort possibly
3416  // re-entering here.
3417  stop("Unexpected alignment");
3418  bind(&alignment_as_expected);
3419  }
3420  }
3421 #endif
3422 
3423  // Just call directly. The function called cannot cause a GC, or
3424  // allow preemption, so the return address in the link register
3425  // stays correct.
3426  Call(function);
3427  int stack_passed_arguments = CalculateStackPassedWords(
3428  num_reg_arguments, num_double_arguments);
3429  if (ActivationFrameAlignment() > kPointerSize) {
3430  ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3431  } else {
3432  add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3433  }
3434 }
3435 
3436 
3437 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3438  Register result) {
3439  const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3440  const int32_t kPCRegOffset = 2 * kPointerSize;
3441  ldr(result, MemOperand(ldr_location));
3442  if (emit_debug_code()) {
3443  // Check that the instruction is a ldr reg, [pc + offset] .
3444  and_(result, result, Operand(kLdrPCPattern));
3445  cmp(result, Operand(kLdrPCPattern));
3446  Check(eq, "The instruction to patch should be a load from pc.");
3447  // Result was clobbered. Restore it.
3448  ldr(result, MemOperand(ldr_location));
3449  }
3450  // Get the address of the constant.
3451  and_(result, result, Operand(kLdrOffsetMask));
3452  add(result, ldr_location, Operand(result));
3453  add(result, result, Operand(kPCRegOffset));
3454 }
3455 
3456 
3457 void MacroAssembler::CheckPageFlag(
3458  Register object,
3459  Register scratch,
3460  int mask,
3461  Condition cc,
3462  Label* condition_met) {
3463  and_(scratch, object, Operand(~Page::kPageAlignmentMask));
3464  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3465  tst(scratch, Operand(mask));
3466  b(cc, condition_met);
3467 }
3468 
3469 
3470 void MacroAssembler::JumpIfBlack(Register object,
3471  Register scratch0,
3472  Register scratch1,
3473  Label* on_black) {
3474  HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3475  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3476 }
3477 
3478 
3479 void MacroAssembler::HasColor(Register object,
3480  Register bitmap_scratch,
3481  Register mask_scratch,
3482  Label* has_color,
3483  int first_bit,
3484  int second_bit) {
3485  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3486 
3487  GetMarkBits(object, bitmap_scratch, mask_scratch);
3488 
3489  Label other_color, word_boundary;
3490  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3491  tst(ip, Operand(mask_scratch));
3492  b(first_bit == 1 ? eq : ne, &other_color);
3493  // Shift left 1 by adding.
3494  add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3495  b(eq, &word_boundary);
3496  tst(ip, Operand(mask_scratch));
3497  b(second_bit == 1 ? ne : eq, has_color);
3498  jmp(&other_color);
3499 
3500  bind(&word_boundary);
3501  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3502  tst(ip, Operand(1));
3503  b(second_bit == 1 ? ne : eq, has_color);
3504  bind(&other_color);
3505 }
3506 
3507 
3508 // Detect some, but not all, common pointer-free objects. This is used by the
3509 // incremental write barrier which doesn't care about oddballs (they are always
3510 // marked black immediately so this code is not hit).
3511 void MacroAssembler::JumpIfDataObject(Register value,
3512  Register scratch,
3513  Label* not_data_object) {
3514  Label is_data_object;
3515  ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3516  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3517  b(eq, &is_data_object);
3519  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3520  // If it's a string and it's not a cons string then it's an object containing
3521  // no GC pointers.
3522  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3523  tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3524  b(ne, not_data_object);
3525  bind(&is_data_object);
3526 }
3527 
3528 
3529 void MacroAssembler::GetMarkBits(Register addr_reg,
3530  Register bitmap_reg,
3531  Register mask_reg) {
3532  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3533  and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3534  Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3535  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3536  Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3537  add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3538  mov(ip, Operand(1));
3539  mov(mask_reg, Operand(ip, LSL, mask_reg));
3540 }
3541 
3542 
3543 void MacroAssembler::EnsureNotWhite(
3544  Register value,
3545  Register bitmap_scratch,
3546  Register mask_scratch,
3547  Register load_scratch,
3548  Label* value_is_white_and_not_data) {
3549  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3550  GetMarkBits(value, bitmap_scratch, mask_scratch);
3551 
3552  // If the value is black or grey we don't need to do anything.
3553  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3554  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3555  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
3556  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3557 
3558  Label done;
3559 
3560  // Since both black and grey have a 1 in the first position and white does
3561  // not have a 1 there we only need to check one bit.
3562  ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3563  tst(mask_scratch, load_scratch);
3564  b(ne, &done);
3565 
3566  if (emit_debug_code()) {
3567  // Check for impossible bit pattern.
3568  Label ok;
3569  // LSL may overflow, making the check conservative.
3570  tst(load_scratch, Operand(mask_scratch, LSL, 1));
3571  b(eq, &ok);
3572  stop("Impossible marking bit pattern");
3573  bind(&ok);
3574  }
3575 
3576  // Value is white. We check whether it is data that doesn't need scanning.
3577  // Currently only checks for HeapNumber and non-cons strings.
3578  Register map = load_scratch; // Holds map while checking type.
3579  Register length = load_scratch; // Holds length of object after testing type.
3580  Label is_data_object;
3581 
3582  // Check for heap-number
3583  ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3584  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3585  mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3586  b(eq, &is_data_object);
3587 
3588  // Check for strings.
3590  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3591  // If it's a string and it's not a cons string then it's an object containing
3592  // no GC pointers.
3593  Register instance_type = load_scratch;
3594  ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3595  tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3596  b(ne, value_is_white_and_not_data);
3597  // It's a non-indirect (non-cons and non-slice) string.
3598  // If it's external, the length is just ExternalString::kSize.
3599  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3600  // External strings are the only ones with the kExternalStringTag bit
3601  // set.
3604  tst(instance_type, Operand(kExternalStringTag));
3605  mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3606  b(ne, &is_data_object);
3607 
3608  // Sequential string, either ASCII or UC16.
3609  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
3610  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3611  // getting the length multiplied by 2.
3613  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3614  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3615  tst(instance_type, Operand(kStringEncodingMask));
3616  mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3617  add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3618  and_(length, length, Operand(~kObjectAlignmentMask));
3619 
3620  bind(&is_data_object);
3621  // Value is a data object, and it is white. Mark it black. Since we know
3622  // that the object is white we can make it black by flipping one bit.
3623  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3624  orr(ip, ip, Operand(mask_scratch));
3625  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3626 
3627  and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3628  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3629  add(ip, ip, Operand(length));
3630  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3631 
3632  bind(&done);
3633 }
3634 
3635 
3636 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3637  Usat(output_reg, 8, Operand(input_reg));
3638 }
3639 
3640 
3641 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3642  DoubleRegister input_reg,
3643  DoubleRegister temp_double_reg) {
3644  Label above_zero;
3645  Label done;
3646  Label in_bounds;
3647 
3648  Vmov(temp_double_reg, 0.0);
3649  VFPCompareAndSetFlags(input_reg, temp_double_reg);
3650  b(gt, &above_zero);
3651 
3652  // Double value is less than zero, NaN or Inf, return 0.
3653  mov(result_reg, Operand(0));
3654  b(al, &done);
3655 
3656  // Double value is >= 255, return 255.
3657  bind(&above_zero);
3658  Vmov(temp_double_reg, 255.0);
3659  VFPCompareAndSetFlags(input_reg, temp_double_reg);
3660  b(le, &in_bounds);
3661  mov(result_reg, Operand(255));
3662  b(al, &done);
3663 
3664  // In 0-255 range, round and truncate.
3665  bind(&in_bounds);
3666  Vmov(temp_double_reg, 0.5);
3667  vadd(temp_double_reg, input_reg, temp_double_reg);
3668  vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
3669  vmov(result_reg, temp_double_reg.low());
3670  bind(&done);
3671 }
3672 
3673 
3674 void MacroAssembler::LoadInstanceDescriptors(Register map,
3675  Register descriptors) {
3676  ldr(descriptors,
3677  FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
3678  Label not_smi;
3679  JumpIfNotSmi(descriptors, &not_smi);
3680  mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
3681  bind(&not_smi);
3682 }
3683 
3684 
3685 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3686  Label next;
3687  // Preload a couple of values used in the loop.
3688  Register empty_fixed_array_value = r6;
3689  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3690  Register empty_descriptor_array_value = r7;
3691  LoadRoot(empty_descriptor_array_value,
3692  Heap::kEmptyDescriptorArrayRootIndex);
3693  mov(r1, r0);
3694  bind(&next);
3695 
3696  // Check that there are no elements. Register r1 contains the
3697  // current JS object we've reached through the prototype chain.
3698  ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
3699  cmp(r2, empty_fixed_array_value);
3700  b(ne, call_runtime);
3701 
3702  // Check that instance descriptors are not empty so that we can
3703  // check for an enum cache. Leave the map in r2 for the subsequent
3704  // prototype load.
3705  ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
3706  ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
3707  JumpIfSmi(r3, call_runtime);
3708 
3709  // Check that there is an enum cache in the non-empty instance
3710  // descriptors (r3). This is the case if the next enumeration
3711  // index field does not contain a smi.
3712  ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
3713  JumpIfSmi(r3, call_runtime);
3714 
3715  // For all objects but the receiver, check that the cache is empty.
3716  Label check_prototype;
3717  cmp(r1, r0);
3718  b(eq, &check_prototype);
3719  ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
3720  cmp(r3, empty_fixed_array_value);
3721  b(ne, call_runtime);
3722 
3723  // Load the prototype from the map and loop if non-null.
3724  bind(&check_prototype);
3725  ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
3726  cmp(r1, null_value);
3727  b(ne, &next);
3728 }
3729 
3730 
3731 #ifdef DEBUG
3732 bool AreAliased(Register reg1,
3733  Register reg2,
3734  Register reg3,
3735  Register reg4,
3736  Register reg5,
3737  Register reg6) {
3738  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3739  reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
3740 
3741  RegList regs = 0;
3742  if (reg1.is_valid()) regs |= reg1.bit();
3743  if (reg2.is_valid()) regs |= reg2.bit();
3744  if (reg3.is_valid()) regs |= reg3.bit();
3745  if (reg4.is_valid()) regs |= reg4.bit();
3746  if (reg5.is_valid()) regs |= reg5.bit();
3747  if (reg6.is_valid()) regs |= reg6.bit();
3748  int n_of_non_aliasing_regs = NumRegs(regs);
3749 
3750  return n_of_valid_regs != n_of_non_aliasing_regs;
3751 }
3752 #endif
3753 
3754 
3755 CodePatcher::CodePatcher(byte* address, int instructions)
3756  : address_(address),
3757  instructions_(instructions),
3758  size_(instructions * Assembler::kInstrSize),
3759  masm_(NULL, address, size_ + Assembler::kGap) {
3760  // Create a new macro assembler pointing to the address of the code to patch.
3761  // The size is adjusted with kGap on order for the assembler to generate size
3762  // bytes of instructions without failing with buffer size constraints.
3763  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3764 }
3765 
3766 
3767 CodePatcher::~CodePatcher() {
3768  // Indicate that code has changed.
3769  CPU::FlushICache(address_, size_);
3770 
3771  // Check that the code was patched as expected.
3772  ASSERT(masm_.pc_ == address_ + size_);
3773  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3774 }
3775 
3776 
3777 void CodePatcher::Emit(Instr instr) {
3778  masm()->emit(instr);
3779 }
3780 
3781 
3782 void CodePatcher::Emit(Address addr) {
3783  masm()->emit(reinterpret_cast<Instr>(addr));
3784 }
3785 
3786 
3787 void CodePatcher::EmitCondition(Condition cond) {
3788  Instr instr = Assembler::instr_at(masm_.pc_);
3789  instr = (instr & ~kCondMask) | cond;
3790  masm_.emit(instr);
3791 }
3792 
3793 
3794 } } // namespace v8::internal
3795 
3796 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:172
const Register cp
const RegList kSafepointSavedRegisters
Definition: frames-arm.h:97
const SwVfpRegister s2
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:3855
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: v8globals.h:478
const Register r3
const int kDoubleSizeLog2
Definition: globals.h:236
const Instr kLdrPCPattern
const uint32_t kVFPInvalidOpExceptionBit
const int kNumRegisters
Definition: constants-arm.h:95
void set_code(int code)
int NumRegs(RegList reglist)
Definition: frames.cc:1375
const DwVfpRegister d0
const uint32_t kVFPOverflowExceptionBit
const Register r6
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 instructions(ARM only)") DEFINE_bool(enable_armv7
const uint32_t kVFPUnderflowExceptionBit
Flag flags[]
Definition: flags.cc:1467
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:38
const int kSignMask
const int kNumSafepointSavedRegisters
Definition: frames-arm.h:98
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
Definition: globals.h:246
const uint32_t kStringRepresentationMask
Definition: objects.h:455
const Register r2
int WhichPowerOf2(uint32_t x)
Definition: utils.h:56
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
bool IsFastElementsKind(ElementsKind kind)
const intptr_t kHeapObjectTagMask
Definition: v8.h:3850
const Register kRootRegister
const uint32_t kVFPFlushToZeroMask
const unsigned kNoASTId
Definition: assembler.h:54
const uint32_t kNotStringTag
Definition: objects.h:438
const Register sp
DwVfpRegister DoubleRegister
bool IsFastPackedElementsKind(ElementsKind kind)
const int kDoubleSize
Definition: globals.h:232
const uint32_t kIsIndirectStringMask
Definition: objects.h:462
const Register ip
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
const int kPointerSize
Definition: globals.h:234
const DwVfpRegister d7
const Address kZapValue
Definition: v8globals.h:89
const int kHeapObjectTag
Definition: v8.h:3848
bool IsAligned(T value, U alignment)
Definition: utils.h:206
const uint32_t kHoleNanLower32
Definition: v8globals.h:477
const Register pc
const uint32_t kVFPExceptionMask
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:50
int TenToThe(int exponent)
Definition: utils.h:794
MacroAssembler(Isolate *isolate, void *buffer, int size)
InvokeFlag
const Register lr
const uint32_t kIsNotStringMask
Definition: objects.h:436
const Register r1
#define kDoubleRegZero
MemOperand FieldMemOperand(Register object, int offset)
const int kNumSafepointRegisters
Definition: frames-arm.h:92
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const uint32_t kVFPInexactExceptionBit
const int kSmiTagSize
Definition: v8.h:3854
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kThisPropertyAssignmentsOffset flag
Definition: objects-inl.h:3682
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping true
Definition: flags.cc:157
const int kSmiTag
Definition: v8.h:3853
#define FACTORY
Definition: isolate.h:1409
const uint32_t kIsIndirectStringTag
Definition: objects.h:463
const int kPageSizeBits
Definition: v8globals.h:100
const uint32_t kVFPRoundingModeMask
const Register no_reg
const DwVfpRegister d1
const Register fp
#define STATIC_ASSERT(test)
Definition: checks.h:283
const uint32_t kAsciiStringTag
Definition: objects.h:451
const SwVfpRegister s15
const int kCharSize
Definition: globals.h:229
FlagType type() const
Definition: flags.cc:1358
const Register r5
const uint32_t kStringEncodingMask
Definition: objects.h:449
const Register r4
const Register r7