v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
29 
30 #include "v8.h"
31 
32 #if defined(V8_TARGET_ARCH_MIPS)
33 
34 #include "bootstrapper.h"
35 #include "codegen.h"
36 #include "debug.h"
37 #include "runtime.h"
38 
39 namespace v8 {
40 namespace internal {
41 
42 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43  : Assembler(arg_isolate, buffer, size),
44  generating_stub_(false),
45  allow_stub_calls_(true),
46  has_frame_(false) {
47  if (isolate() != NULL) {
48  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
49  isolate());
50  }
51 }
52 
53 
54 void MacroAssembler::LoadRoot(Register destination,
55  Heap::RootListIndex index) {
56  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
57 }
58 
59 
60 void MacroAssembler::LoadRoot(Register destination,
61  Heap::RootListIndex index,
62  Condition cond,
63  Register src1, const Operand& src2) {
64  Branch(2, NegateCondition(cond), src1, src2);
65  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
66 }
67 
68 
69 void MacroAssembler::StoreRoot(Register source,
70  Heap::RootListIndex index) {
71  sw(source, MemOperand(s6, index << kPointerSizeLog2));
72 }
73 
74 
75 void MacroAssembler::StoreRoot(Register source,
76  Heap::RootListIndex index,
77  Condition cond,
78  Register src1, const Operand& src2) {
79  Branch(2, NegateCondition(cond), src1, src2);
80  sw(source, MemOperand(s6, index << kPointerSizeLog2));
81 }
82 
83 
84 void MacroAssembler::LoadHeapObject(Register result,
85  Handle<HeapObject> object) {
86  if (isolate()->heap()->InNewSpace(*object)) {
87  Handle<JSGlobalPropertyCell> cell =
88  isolate()->factory()->NewJSGlobalPropertyCell(object);
89  li(result, Operand(cell));
90  lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
91  } else {
92  li(result, Operand(object));
93  }
94 }
95 
96 
97 // Push and pop all registers that can hold pointers.
98 void MacroAssembler::PushSafepointRegisters() {
99  // Safepoints expect a block of kNumSafepointRegisters values on the
100  // stack, so adjust the stack for unsaved registers.
101  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
102  ASSERT(num_unsaved >= 0);
103  if (num_unsaved > 0) {
104  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
105  }
106  MultiPush(kSafepointSavedRegisters);
107 }
108 
109 
110 void MacroAssembler::PopSafepointRegisters() {
111  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
112  MultiPop(kSafepointSavedRegisters);
113  if (num_unsaved > 0) {
114  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
115  }
116 }
117 
118 
119 void MacroAssembler::PushSafepointRegistersAndDoubles() {
120  PushSafepointRegisters();
121  Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
122  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
123  FPURegister reg = FPURegister::FromAllocationIndex(i);
124  sdc1(reg, MemOperand(sp, i * kDoubleSize));
125  }
126 }
127 
128 
129 void MacroAssembler::PopSafepointRegistersAndDoubles() {
130  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
131  FPURegister reg = FPURegister::FromAllocationIndex(i);
132  ldc1(reg, MemOperand(sp, i * kDoubleSize));
133  }
134  Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
135  PopSafepointRegisters();
136 }
137 
138 
139 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
140  Register dst) {
141  sw(src, SafepointRegistersAndDoublesSlot(dst));
142 }
143 
144 
145 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
146  sw(src, SafepointRegisterSlot(dst));
147 }
148 
149 
150 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
151  lw(dst, SafepointRegisterSlot(src));
152 }
153 
154 
155 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
156  // The registers are pushed starting with the highest encoding,
157  // which means that lowest encodings are closest to the stack pointer.
158  return kSafepointRegisterStackIndexMap[reg_code];
159 }
160 
161 
162 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
163  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
164 }
165 
166 
167 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
169  // General purpose registers are pushed last on the stack.
170  int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
171  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
172  return MemOperand(sp, doubles_size + register_offset);
173 }
174 
175 
176 void MacroAssembler::InNewSpace(Register object,
177  Register scratch,
178  Condition cc,
179  Label* branch) {
180  ASSERT(cc == eq || cc == ne);
181  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
182  Branch(branch, cc, scratch,
183  Operand(ExternalReference::new_space_start(isolate())));
184 }
185 
186 
187 void MacroAssembler::RecordWriteField(
188  Register object,
189  int offset,
190  Register value,
191  Register dst,
192  RAStatus ra_status,
193  SaveFPRegsMode save_fp,
194  RememberedSetAction remembered_set_action,
195  SmiCheck smi_check) {
196  ASSERT(!AreAliased(value, dst, t8, object));
197  // First, check if a write barrier is even needed. The tests below
198  // catch stores of Smis.
199  Label done;
200 
201  // Skip barrier if writing a smi.
202  if (smi_check == INLINE_SMI_CHECK) {
203  JumpIfSmi(value, &done);
204  }
205 
206  // Although the object register is tagged, the offset is relative to the start
207  // of the object, so so offset must be a multiple of kPointerSize.
208  ASSERT(IsAligned(offset, kPointerSize));
209 
210  Addu(dst, object, Operand(offset - kHeapObjectTag));
211  if (emit_debug_code()) {
212  Label ok;
213  And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
214  Branch(&ok, eq, t8, Operand(zero_reg));
215  stop("Unaligned cell in write barrier");
216  bind(&ok);
217  }
218 
219  RecordWrite(object,
220  dst,
221  value,
222  ra_status,
223  save_fp,
224  remembered_set_action,
226 
227  bind(&done);
228 
229  // Clobber clobbered input registers when running with the debug-code flag
230  // turned on to provoke errors.
231  if (emit_debug_code()) {
232  li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
233  li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
234  }
235 }
236 
237 
238 // Will clobber 4 registers: object, address, scratch, ip. The
239 // register 'object' contains a heap object pointer. The heap object
240 // tag is shifted away.
241 void MacroAssembler::RecordWrite(Register object,
242  Register address,
243  Register value,
244  RAStatus ra_status,
245  SaveFPRegsMode fp_mode,
246  RememberedSetAction remembered_set_action,
247  SmiCheck smi_check) {
248  ASSERT(!AreAliased(object, address, value, t8));
249  ASSERT(!AreAliased(object, address, value, t9));
250  // The compiled code assumes that record write doesn't change the
251  // context register, so we check that none of the clobbered
252  // registers are cp.
253  ASSERT(!address.is(cp) && !value.is(cp));
254 
255  if (emit_debug_code()) {
256  lw(at, MemOperand(address));
257  Assert(
258  eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
259  }
260 
261  Label done;
262 
263  if (smi_check == INLINE_SMI_CHECK) {
264  ASSERT_EQ(0, kSmiTag);
265  JumpIfSmi(value, &done);
266  }
267 
268  CheckPageFlag(value,
269  value, // Used as scratch.
270  MemoryChunk::kPointersToHereAreInterestingMask,
271  eq,
272  &done);
273  CheckPageFlag(object,
274  value, // Used as scratch.
275  MemoryChunk::kPointersFromHereAreInterestingMask,
276  eq,
277  &done);
278 
279  // Record the actual write.
280  if (ra_status == kRAHasNotBeenSaved) {
281  push(ra);
282  }
283  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
284  CallStub(&stub);
285  if (ra_status == kRAHasNotBeenSaved) {
286  pop(ra);
287  }
288 
289  bind(&done);
290 
291  // Clobber clobbered registers when running with the debug-code flag
292  // turned on to provoke errors.
293  if (emit_debug_code()) {
294  li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
295  li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
296  }
297 }
298 
299 
300 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
301  Register address,
302  Register scratch,
303  SaveFPRegsMode fp_mode,
304  RememberedSetFinalAction and_then) {
305  Label done;
306  if (emit_debug_code()) {
307  Label ok;
308  JumpIfNotInNewSpace(object, scratch, &ok);
309  stop("Remembered set pointer is in new space");
310  bind(&ok);
311  }
312  // Load store buffer top.
313  ExternalReference store_buffer =
314  ExternalReference::store_buffer_top(isolate());
315  li(t8, Operand(store_buffer));
316  lw(scratch, MemOperand(t8));
317  // Store pointer to buffer and increment buffer top.
318  sw(address, MemOperand(scratch));
319  Addu(scratch, scratch, kPointerSize);
320  // Write back new top of buffer.
321  sw(scratch, MemOperand(t8));
322  // Call stub on end of buffer.
323  // Check for end of buffer.
324  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
325  if (and_then == kFallThroughAtEnd) {
326  Branch(&done, eq, t8, Operand(zero_reg));
327  } else {
328  ASSERT(and_then == kReturnAtEnd);
329  Ret(eq, t8, Operand(zero_reg));
330  }
331  push(ra);
332  StoreBufferOverflowStub store_buffer_overflow =
333  StoreBufferOverflowStub(fp_mode);
334  CallStub(&store_buffer_overflow);
335  pop(ra);
336  bind(&done);
337  if (and_then == kReturnAtEnd) {
338  Ret();
339  }
340 }
341 
342 
343 // -----------------------------------------------------------------------------
344 // Allocation support.
345 
346 
347 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
348  Register scratch,
349  Label* miss) {
350  Label same_contexts;
351 
352  ASSERT(!holder_reg.is(scratch));
353  ASSERT(!holder_reg.is(at));
354  ASSERT(!scratch.is(at));
355 
356  // Load current lexical context from the stack frame.
357  lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
358  // In debug mode, make sure the lexical context is set.
359 #ifdef DEBUG
360  Check(ne, "we should not have an empty lexical context",
361  scratch, Operand(zero_reg));
362 #endif
363 
364  // Load the native context of the current context.
365  int offset =
366  Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
367  lw(scratch, FieldMemOperand(scratch, offset));
368  lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
369 
370  // Check the context is a native context.
371  if (emit_debug_code()) {
372  // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
373  push(holder_reg); // Temporarily save holder on the stack.
374  // Read the first word and compare to the native_context_map.
375  lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
376  LoadRoot(at, Heap::kNativeContextMapRootIndex);
377  Check(eq, "JSGlobalObject::native_context should be a native context.",
378  holder_reg, Operand(at));
379  pop(holder_reg); // Restore holder.
380  }
381 
382  // Check if both contexts are the same.
383  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
384  Branch(&same_contexts, eq, scratch, Operand(at));
385 
386  // Check the context is a native context.
387  if (emit_debug_code()) {
388  // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
389  push(holder_reg); // Temporarily save holder on the stack.
390  mov(holder_reg, at); // Move at to its holding place.
391  LoadRoot(at, Heap::kNullValueRootIndex);
392  Check(ne, "JSGlobalProxy::context() should not be null.",
393  holder_reg, Operand(at));
394 
395  lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
396  LoadRoot(at, Heap::kNativeContextMapRootIndex);
397  Check(eq, "JSGlobalObject::native_context should be a native context.",
398  holder_reg, Operand(at));
399  // Restore at is not needed. at is reloaded below.
400  pop(holder_reg); // Restore holder.
401  // Restore at to holder's context.
402  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
403  }
404 
405  // Check that the security token in the calling global object is
406  // compatible with the security token in the receiving global
407  // object.
408  int token_offset = Context::kHeaderSize +
409  Context::SECURITY_TOKEN_INDEX * kPointerSize;
410 
411  lw(scratch, FieldMemOperand(scratch, token_offset));
412  lw(at, FieldMemOperand(at, token_offset));
413  Branch(miss, ne, scratch, Operand(at));
414 
415  bind(&same_contexts);
416 }
417 
418 
419 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
420  // First of all we assign the hash seed to scratch.
421  LoadRoot(scratch, Heap::kHashSeedRootIndex);
422  SmiUntag(scratch);
423 
424  // Xor original key with a seed.
425  xor_(reg0, reg0, scratch);
426 
427  // Compute the hash code from the untagged key. This must be kept in sync
428  // with ComputeIntegerHash in utils.h.
429  //
430  // hash = ~hash + (hash << 15);
431  nor(scratch, reg0, zero_reg);
432  sll(at, reg0, 15);
433  addu(reg0, scratch, at);
434 
435  // hash = hash ^ (hash >> 12);
436  srl(at, reg0, 12);
437  xor_(reg0, reg0, at);
438 
439  // hash = hash + (hash << 2);
440  sll(at, reg0, 2);
441  addu(reg0, reg0, at);
442 
443  // hash = hash ^ (hash >> 4);
444  srl(at, reg0, 4);
445  xor_(reg0, reg0, at);
446 
447  // hash = hash * 2057;
448  sll(scratch, reg0, 11);
449  sll(at, reg0, 3);
450  addu(reg0, reg0, at);
451  addu(reg0, reg0, scratch);
452 
453  // hash = hash ^ (hash >> 16);
454  srl(at, reg0, 16);
455  xor_(reg0, reg0, at);
456 }
457 
458 
459 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
460  Register elements,
461  Register key,
462  Register result,
463  Register reg0,
464  Register reg1,
465  Register reg2) {
466  // Register use:
467  //
468  // elements - holds the slow-case elements of the receiver on entry.
469  // Unchanged unless 'result' is the same register.
470  //
471  // key - holds the smi key on entry.
472  // Unchanged unless 'result' is the same register.
473  //
474  //
475  // result - holds the result on exit if the load succeeded.
476  // Allowed to be the same as 'key' or 'result'.
477  // Unchanged on bailout so 'key' or 'result' can be used
478  // in further computation.
479  //
480  // Scratch registers:
481  //
482  // reg0 - holds the untagged key on entry and holds the hash once computed.
483  //
484  // reg1 - Used to hold the capacity mask of the dictionary.
485  //
486  // reg2 - Used for the index into the dictionary.
487  // at - Temporary (avoid MacroAssembler instructions also using 'at').
488  Label done;
489 
490  GetNumberHash(reg0, reg1);
491 
492  // Compute the capacity mask.
493  lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
494  sra(reg1, reg1, kSmiTagSize);
495  Subu(reg1, reg1, Operand(1));
496 
497  // Generate an unrolled loop that performs a few probes before giving up.
498  static const int kProbes = 4;
499  for (int i = 0; i < kProbes; i++) {
500  // Use reg2 for index calculations and keep the hash intact in reg0.
501  mov(reg2, reg0);
502  // Compute the masked index: (hash + i + i * i) & mask.
503  if (i > 0) {
504  Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
505  }
506  and_(reg2, reg2, reg1);
507 
508  // Scale the index by multiplying by the element size.
509  ASSERT(SeededNumberDictionary::kEntrySize == 3);
510  sll(at, reg2, 1); // 2x.
511  addu(reg2, reg2, at); // reg2 = reg2 * 3.
512 
513  // Check if the key is identical to the name.
514  sll(at, reg2, kPointerSizeLog2);
515  addu(reg2, elements, at);
516 
517  lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
518  if (i != kProbes - 1) {
519  Branch(&done, eq, key, Operand(at));
520  } else {
521  Branch(miss, ne, key, Operand(at));
522  }
523  }
524 
525  bind(&done);
526  // Check that the value is a normal property.
527  // reg2: elements + (index * kPointerSize).
528  const int kDetailsOffset =
529  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
530  lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
531  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
532  Branch(miss, ne, at, Operand(zero_reg));
533 
534  // Get the value at the masked, scaled index and return.
535  const int kValueOffset =
536  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
537  lw(result, FieldMemOperand(reg2, kValueOffset));
538 }
539 
540 
541 // ---------------------------------------------------------------------------
542 // Instruction macros.
543 
544 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
545  if (rt.is_reg()) {
546  addu(rd, rs, rt.rm());
547  } else {
548  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
549  addiu(rd, rs, rt.imm32_);
550  } else {
551  // li handles the relocation.
552  ASSERT(!rs.is(at));
553  li(at, rt);
554  addu(rd, rs, at);
555  }
556  }
557 }
558 
559 
560 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
561  if (rt.is_reg()) {
562  subu(rd, rs, rt.rm());
563  } else {
564  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
565  addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
566  } else {
567  // li handles the relocation.
568  ASSERT(!rs.is(at));
569  li(at, rt);
570  subu(rd, rs, at);
571  }
572  }
573 }
574 
575 
576 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
577  if (rt.is_reg()) {
578  if (kArchVariant == kLoongson) {
579  mult(rs, rt.rm());
580  mflo(rd);
581  } else {
582  mul(rd, rs, rt.rm());
583  }
584  } else {
585  // li handles the relocation.
586  ASSERT(!rs.is(at));
587  li(at, rt);
588  if (kArchVariant == kLoongson) {
589  mult(rs, at);
590  mflo(rd);
591  } else {
592  mul(rd, rs, at);
593  }
594  }
595 }
596 
597 
598 void MacroAssembler::Mult(Register rs, const Operand& rt) {
599  if (rt.is_reg()) {
600  mult(rs, rt.rm());
601  } else {
602  // li handles the relocation.
603  ASSERT(!rs.is(at));
604  li(at, rt);
605  mult(rs, at);
606  }
607 }
608 
609 
610 void MacroAssembler::Multu(Register rs, const Operand& rt) {
611  if (rt.is_reg()) {
612  multu(rs, rt.rm());
613  } else {
614  // li handles the relocation.
615  ASSERT(!rs.is(at));
616  li(at, rt);
617  multu(rs, at);
618  }
619 }
620 
621 
622 void MacroAssembler::Div(Register rs, const Operand& rt) {
623  if (rt.is_reg()) {
624  div(rs, rt.rm());
625  } else {
626  // li handles the relocation.
627  ASSERT(!rs.is(at));
628  li(at, rt);
629  div(rs, at);
630  }
631 }
632 
633 
634 void MacroAssembler::Divu(Register rs, const Operand& rt) {
635  if (rt.is_reg()) {
636  divu(rs, rt.rm());
637  } else {
638  // li handles the relocation.
639  ASSERT(!rs.is(at));
640  li(at, rt);
641  divu(rs, at);
642  }
643 }
644 
645 
646 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
647  if (rt.is_reg()) {
648  and_(rd, rs, rt.rm());
649  } else {
650  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
651  andi(rd, rs, rt.imm32_);
652  } else {
653  // li handles the relocation.
654  ASSERT(!rs.is(at));
655  li(at, rt);
656  and_(rd, rs, at);
657  }
658  }
659 }
660 
661 
662 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
663  if (rt.is_reg()) {
664  or_(rd, rs, rt.rm());
665  } else {
666  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
667  ori(rd, rs, rt.imm32_);
668  } else {
669  // li handles the relocation.
670  ASSERT(!rs.is(at));
671  li(at, rt);
672  or_(rd, rs, at);
673  }
674  }
675 }
676 
677 
678 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
679  if (rt.is_reg()) {
680  xor_(rd, rs, rt.rm());
681  } else {
682  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
683  xori(rd, rs, rt.imm32_);
684  } else {
685  // li handles the relocation.
686  ASSERT(!rs.is(at));
687  li(at, rt);
688  xor_(rd, rs, at);
689  }
690  }
691 }
692 
693 
694 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
695  if (rt.is_reg()) {
696  nor(rd, rs, rt.rm());
697  } else {
698  // li handles the relocation.
699  ASSERT(!rs.is(at));
700  li(at, rt);
701  nor(rd, rs, at);
702  }
703 }
704 
705 
706 void MacroAssembler::Neg(Register rs, const Operand& rt) {
707  ASSERT(rt.is_reg());
708  ASSERT(!at.is(rs));
709  ASSERT(!at.is(rt.rm()));
710  li(at, -1);
711  xor_(rs, rt.rm(), at);
712 }
713 
714 
715 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
716  if (rt.is_reg()) {
717  slt(rd, rs, rt.rm());
718  } else {
719  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
720  slti(rd, rs, rt.imm32_);
721  } else {
722  // li handles the relocation.
723  ASSERT(!rs.is(at));
724  li(at, rt);
725  slt(rd, rs, at);
726  }
727  }
728 }
729 
730 
731 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
732  if (rt.is_reg()) {
733  sltu(rd, rs, rt.rm());
734  } else {
735  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
736  sltiu(rd, rs, rt.imm32_);
737  } else {
738  // li handles the relocation.
739  ASSERT(!rs.is(at));
740  li(at, rt);
741  sltu(rd, rs, at);
742  }
743  }
744 }
745 
746 
747 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
748  if (kArchVariant == kMips32r2) {
749  if (rt.is_reg()) {
750  rotrv(rd, rs, rt.rm());
751  } else {
752  rotr(rd, rs, rt.imm32_);
753  }
754  } else {
755  if (rt.is_reg()) {
756  subu(at, zero_reg, rt.rm());
757  sllv(at, rs, at);
758  srlv(rd, rs, rt.rm());
759  or_(rd, rd, at);
760  } else {
761  if (rt.imm32_ == 0) {
762  srl(rd, rs, 0);
763  } else {
764  srl(at, rs, rt.imm32_);
765  sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
766  or_(rd, rd, at);
767  }
768  }
769  }
770 }
771 
772 //------------Pseudo-instructions-------------
773 
774 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
775  ASSERT(!j.is_reg());
776  BlockTrampolinePoolScope block_trampoline_pool(this);
777  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
778  // Normal load of an immediate value which does not need Relocation Info.
779  if (is_int16(j.imm32_)) {
780  addiu(rd, zero_reg, j.imm32_);
781  } else if (!(j.imm32_ & kHiMask)) {
782  ori(rd, zero_reg, j.imm32_);
783  } else if (!(j.imm32_ & kImm16Mask)) {
784  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
785  } else {
786  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
787  ori(rd, rd, (j.imm32_ & kImm16Mask));
788  }
789  } else {
790  if (MustUseReg(j.rmode_)) {
791  RecordRelocInfo(j.rmode_, j.imm32_);
792  }
793  // We always need the same number of instructions as we may need to patch
794  // this code to load another value which may need 2 instructions to load.
795  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
796  ori(rd, rd, (j.imm32_ & kImm16Mask));
797  }
798 }
799 
800 
801 void MacroAssembler::MultiPush(RegList regs) {
802  int16_t num_to_push = NumberOfBitsSet(regs);
803  int16_t stack_offset = num_to_push * kPointerSize;
804 
805  Subu(sp, sp, Operand(stack_offset));
806  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
807  if ((regs & (1 << i)) != 0) {
808  stack_offset -= kPointerSize;
809  sw(ToRegister(i), MemOperand(sp, stack_offset));
810  }
811  }
812 }
813 
814 
815 void MacroAssembler::MultiPushReversed(RegList regs) {
816  int16_t num_to_push = NumberOfBitsSet(regs);
817  int16_t stack_offset = num_to_push * kPointerSize;
818 
819  Subu(sp, sp, Operand(stack_offset));
820  for (int16_t i = 0; i < kNumRegisters; i++) {
821  if ((regs & (1 << i)) != 0) {
822  stack_offset -= kPointerSize;
823  sw(ToRegister(i), MemOperand(sp, stack_offset));
824  }
825  }
826 }
827 
828 
829 void MacroAssembler::MultiPop(RegList regs) {
830  int16_t stack_offset = 0;
831 
832  for (int16_t i = 0; i < kNumRegisters; i++) {
833  if ((regs & (1 << i)) != 0) {
834  lw(ToRegister(i), MemOperand(sp, stack_offset));
835  stack_offset += kPointerSize;
836  }
837  }
838  addiu(sp, sp, stack_offset);
839 }
840 
841 
842 void MacroAssembler::MultiPopReversed(RegList regs) {
843  int16_t stack_offset = 0;
844 
845  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
846  if ((regs & (1 << i)) != 0) {
847  lw(ToRegister(i), MemOperand(sp, stack_offset));
848  stack_offset += kPointerSize;
849  }
850  }
851  addiu(sp, sp, stack_offset);
852 }
853 
854 
855 void MacroAssembler::MultiPushFPU(RegList regs) {
856  CpuFeatures::Scope scope(FPU);
857  int16_t num_to_push = NumberOfBitsSet(regs);
858  int16_t stack_offset = num_to_push * kDoubleSize;
859 
860  Subu(sp, sp, Operand(stack_offset));
861  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
862  if ((regs & (1 << i)) != 0) {
863  stack_offset -= kDoubleSize;
864  sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
865  }
866  }
867 }
868 
869 
870 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
871  CpuFeatures::Scope scope(FPU);
872  int16_t num_to_push = NumberOfBitsSet(regs);
873  int16_t stack_offset = num_to_push * kDoubleSize;
874 
875  Subu(sp, sp, Operand(stack_offset));
876  for (int16_t i = 0; i < kNumRegisters; i++) {
877  if ((regs & (1 << i)) != 0) {
878  stack_offset -= kDoubleSize;
879  sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
880  }
881  }
882 }
883 
884 
885 void MacroAssembler::MultiPopFPU(RegList regs) {
886  CpuFeatures::Scope scope(FPU);
887  int16_t stack_offset = 0;
888 
889  for (int16_t i = 0; i < kNumRegisters; i++) {
890  if ((regs & (1 << i)) != 0) {
891  ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
892  stack_offset += kDoubleSize;
893  }
894  }
895  addiu(sp, sp, stack_offset);
896 }
897 
898 
899 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
900  CpuFeatures::Scope scope(FPU);
901  int16_t stack_offset = 0;
902 
903  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
904  if ((regs & (1 << i)) != 0) {
905  ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
906  stack_offset += kDoubleSize;
907  }
908  }
909  addiu(sp, sp, stack_offset);
910 }
911 
912 
913 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
914  RegList saved_regs = kJSCallerSaved | ra.bit();
915  MultiPush(saved_regs);
916  AllowExternalCallThatCantCauseGC scope(this);
917 
918  // Save to a0 in case address == t0.
919  Move(a0, address);
920  PrepareCallCFunction(2, t0);
921 
922  li(a1, instructions * kInstrSize);
923  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
924  MultiPop(saved_regs);
925 }
926 
927 
928 void MacroAssembler::Ext(Register rt,
929  Register rs,
930  uint16_t pos,
931  uint16_t size) {
932  ASSERT(pos < 32);
933  ASSERT(pos + size < 33);
934 
935  if (kArchVariant == kMips32r2) {
936  ext_(rt, rs, pos, size);
937  } else {
938  // Move rs to rt and shift it left then right to get the
939  // desired bitfield on the right side and zeroes on the left.
940  int shift_left = 32 - (pos + size);
941  sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
942 
943  int shift_right = 32 - size;
944  if (shift_right > 0) {
945  srl(rt, rt, shift_right);
946  }
947  }
948 }
949 
950 
951 void MacroAssembler::Ins(Register rt,
952  Register rs,
953  uint16_t pos,
954  uint16_t size) {
955  ASSERT(pos < 32);
956  ASSERT(pos + size <= 32);
957  ASSERT(size != 0);
958 
959  if (kArchVariant == kMips32r2) {
960  ins_(rt, rs, pos, size);
961  } else {
962  ASSERT(!rt.is(t8) && !rs.is(t8));
963  Subu(at, zero_reg, Operand(1));
964  srl(at, at, 32 - size);
965  and_(t8, rs, at);
966  sll(t8, t8, pos);
967  sll(at, at, pos);
968  nor(at, at, zero_reg);
969  and_(at, rt, at);
970  or_(rt, t8, at);
971  }
972 }
973 
974 
975 void MacroAssembler::Cvt_d_uw(FPURegister fd,
976  FPURegister fs,
977  FPURegister scratch) {
978  // Move the data from fs to t8.
979  mfc1(t8, fs);
980  Cvt_d_uw(fd, t8, scratch);
981 }
982 
983 
984 void MacroAssembler::Cvt_d_uw(FPURegister fd,
985  Register rs,
986  FPURegister scratch) {
987  // Convert rs to a FP value in fd (and fd + 1).
988  // We do this by converting rs minus the MSB to avoid sign conversion,
989  // then adding 2^31 to the result (if needed).
990 
991  ASSERT(!fd.is(scratch));
992  ASSERT(!rs.is(t9));
993  ASSERT(!rs.is(at));
994 
995  // Save rs's MSB to t9.
996  Ext(t9, rs, 31, 1);
997  // Remove rs's MSB.
998  Ext(at, rs, 0, 31);
999  // Move the result to fd.
1000  mtc1(at, fd);
1001 
1002  // Convert fd to a real FP value.
1003  cvt_d_w(fd, fd);
1004 
1005  Label conversion_done;
1006 
1007  // If rs's MSB was 0, it's done.
1008  // Otherwise we need to add that to the FP register.
1009  Branch(&conversion_done, eq, t9, Operand(zero_reg));
1010 
1011  // Load 2^31 into f20 as its float representation.
1012  li(at, 0x41E00000);
1013  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1014  mtc1(zero_reg, scratch);
1015  // Add it to fd.
1016  add_d(fd, fd, scratch);
1017 
1018  bind(&conversion_done);
1019 }
1020 
1021 
1022 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1023  FPURegister fs,
1024  FPURegister scratch) {
1025  Trunc_uw_d(fs, t8, scratch);
1026  mtc1(t8, fd);
1027 }
1028 
1029 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1030  if (kArchVariant == kLoongson && fd.is(fs)) {
1031  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1032  trunc_w_d(fd, fs);
1033  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1034  } else {
1035  trunc_w_d(fd, fs);
1036  }
1037 }
1038 
1039 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1040  if (kArchVariant == kLoongson && fd.is(fs)) {
1041  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1042  round_w_d(fd, fs);
1043  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1044  } else {
1045  round_w_d(fd, fs);
1046  }
1047 }
1048 
1049 
1050 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1051  if (kArchVariant == kLoongson && fd.is(fs)) {
1052  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1053  floor_w_d(fd, fs);
1054  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1055  } else {
1056  floor_w_d(fd, fs);
1057  }
1058 }
1059 
1060 
1061 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1062  if (kArchVariant == kLoongson && fd.is(fs)) {
1063  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1064  ceil_w_d(fd, fs);
1065  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1066  } else {
1067  ceil_w_d(fd, fs);
1068  }
1069 }
1070 
1071 
1072 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1073  Register rs,
1074  FPURegister scratch) {
1075  ASSERT(!fd.is(scratch));
1076  ASSERT(!rs.is(at));
1077 
1078  // Load 2^31 into scratch as its float representation.
1079  li(at, 0x41E00000);
1080  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1081  mtc1(zero_reg, scratch);
1082  // Test if scratch > fd.
1083  // If fd < 2^31 we can convert it normally.
1084  Label simple_convert;
1085  BranchF(&simple_convert, NULL, lt, fd, scratch);
1086 
1087  // First we subtract 2^31 from fd, then trunc it to rs
1088  // and add 2^31 to rs.
1089  sub_d(scratch, fd, scratch);
1090  trunc_w_d(scratch, scratch);
1091  mfc1(rs, scratch);
1092  Or(rs, rs, 1 << 31);
1093 
1094  Label done;
1095  Branch(&done);
1096  // Simple conversion.
1097  bind(&simple_convert);
1098  trunc_w_d(scratch, fd);
1099  mfc1(rs, scratch);
1100 
1101  bind(&done);
1102 }
1103 
1104 
1105 void MacroAssembler::BranchF(Label* target,
1106  Label* nan,
1107  Condition cc,
1108  FPURegister cmp1,
1109  FPURegister cmp2,
1110  BranchDelaySlot bd) {
1111  if (cc == al) {
1112  Branch(bd, target);
1113  return;
1114  }
1115 
1116  ASSERT(nan || target);
1117  // Check for unordered (NaN) cases.
1118  if (nan) {
1119  c(UN, D, cmp1, cmp2);
1120  bc1t(nan);
1121  }
1122 
1123  if (target) {
1124  // Here NaN cases were either handled by this function or are assumed to
1125  // have been handled by the caller.
1126  // Unsigned conditions are treated as their signed counterpart.
1127  switch (cc) {
1128  case Uless:
1129  case less:
1130  c(OLT, D, cmp1, cmp2);
1131  bc1t(target);
1132  break;
1133  case Ugreater:
1134  case greater:
1135  c(ULE, D, cmp1, cmp2);
1136  bc1f(target);
1137  break;
1138  case Ugreater_equal:
1139  case greater_equal:
1140  c(ULT, D, cmp1, cmp2);
1141  bc1f(target);
1142  break;
1143  case Uless_equal:
1144  case less_equal:
1145  c(OLE, D, cmp1, cmp2);
1146  bc1t(target);
1147  break;
1148  case eq:
1149  c(EQ, D, cmp1, cmp2);
1150  bc1t(target);
1151  break;
1152  case ne:
1153  c(EQ, D, cmp1, cmp2);
1154  bc1f(target);
1155  break;
1156  default:
1157  CHECK(0);
1158  };
1159  }
1160 
1161  if (bd == PROTECT) {
1162  nop();
1163  }
1164 }
1165 
1166 
1167 void MacroAssembler::Move(FPURegister dst, double imm) {
1168  ASSERT(CpuFeatures::IsEnabled(FPU));
1169  static const DoubleRepresentation minus_zero(-0.0);
1170  static const DoubleRepresentation zero(0.0);
1171  DoubleRepresentation value(imm);
1172  // Handle special values first.
1173  bool force_load = dst.is(kDoubleRegZero);
1174  if (value.bits == zero.bits && !force_load) {
1175  mov_d(dst, kDoubleRegZero);
1176  } else if (value.bits == minus_zero.bits && !force_load) {
1177  neg_d(dst, kDoubleRegZero);
1178  } else {
1179  uint32_t lo, hi;
1180  DoubleAsTwoUInt32(imm, &lo, &hi);
1181  // Move the low part of the double into the lower of the corresponding FPU
1182  // register of FPU register pair.
1183  if (lo != 0) {
1184  li(at, Operand(lo));
1185  mtc1(at, dst);
1186  } else {
1187  mtc1(zero_reg, dst);
1188  }
1189  // Move the high part of the double into the higher of the corresponding FPU
1190  // register of FPU register pair.
1191  if (hi != 0) {
1192  li(at, Operand(hi));
1193  mtc1(at, dst.high());
1194  } else {
1195  mtc1(zero_reg, dst.high());
1196  }
1197  }
1198 }
1199 
1200 
1201 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1202  if (kArchVariant == kLoongson) {
1203  Label done;
1204  Branch(&done, ne, rt, Operand(zero_reg));
1205  mov(rd, rs);
1206  bind(&done);
1207  } else {
1208  movz(rd, rs, rt);
1209  }
1210 }
1211 
1212 
1213 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1214  if (kArchVariant == kLoongson) {
1215  Label done;
1216  Branch(&done, eq, rt, Operand(zero_reg));
1217  mov(rd, rs);
1218  bind(&done);
1219  } else {
1220  movn(rd, rs, rt);
1221  }
1222 }
1223 
1224 
1225 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1226  if (kArchVariant == kLoongson) {
1227  // Tests an FP condition code and then conditionally move rs to rd.
1228  // We do not currently use any FPU cc bit other than bit 0.
1229  ASSERT(cc == 0);
1230  ASSERT(!(rs.is(t8) || rd.is(t8)));
1231  Label done;
1232  Register scratch = t8;
1233  // For testing purposes we need to fetch content of the FCSR register and
1234  // than test its cc (floating point condition code) bit (for cc = 0, it is
1235  // 24. bit of the FCSR).
1236  cfc1(scratch, FCSR);
1237  // For the MIPS I, II and III architectures, the contents of scratch is
1238  // UNPREDICTABLE for the instruction immediately following CFC1.
1239  nop();
1240  srl(scratch, scratch, 16);
1241  andi(scratch, scratch, 0x0080);
1242  Branch(&done, eq, scratch, Operand(zero_reg));
1243  mov(rd, rs);
1244  bind(&done);
1245  } else {
1246  movt(rd, rs, cc);
1247  }
1248 }
1249 
1250 
1251 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1252  if (kArchVariant == kLoongson) {
1253  // Tests an FP condition code and then conditionally move rs to rd.
1254  // We do not currently use any FPU cc bit other than bit 0.
1255  ASSERT(cc == 0);
1256  ASSERT(!(rs.is(t8) || rd.is(t8)));
1257  Label done;
1258  Register scratch = t8;
1259  // For testing purposes we need to fetch content of the FCSR register and
1260  // than test its cc (floating point condition code) bit (for cc = 0, it is
1261  // 24. bit of the FCSR).
1262  cfc1(scratch, FCSR);
1263  // For the MIPS I, II and III architectures, the contents of scratch is
1264  // UNPREDICTABLE for the instruction immediately following CFC1.
1265  nop();
1266  srl(scratch, scratch, 16);
1267  andi(scratch, scratch, 0x0080);
1268  Branch(&done, ne, scratch, Operand(zero_reg));
1269  mov(rd, rs);
1270  bind(&done);
1271  } else {
1272  movf(rd, rs, cc);
1273  }
1274 }
1275 
1276 
1277 void MacroAssembler::Clz(Register rd, Register rs) {
1278  if (kArchVariant == kLoongson) {
1279  ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1280  Register mask = t8;
1281  Register scratch = t9;
1282  Label loop, end;
1283  mov(at, rs);
1284  mov(rd, zero_reg);
1285  lui(mask, 0x8000);
1286  bind(&loop);
1287  and_(scratch, at, mask);
1288  Branch(&end, ne, scratch, Operand(zero_reg));
1289  addiu(rd, rd, 1);
1290  Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1291  srl(mask, mask, 1);
1292  bind(&end);
1293  } else {
1294  clz(rd, rs);
1295  }
1296 }
1297 
1298 
1299 // Tries to get a signed int32 out of a double precision floating point heap
1300 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
1301 // 32bits signed integer range.
1302 // This method implementation differs from the ARM version for performance
1303 // reasons.
1304 void MacroAssembler::ConvertToInt32(Register source,
1305  Register dest,
1306  Register scratch,
1307  Register scratch2,
1308  FPURegister double_scratch,
1309  Label *not_int32) {
1310  Label right_exponent, done;
1311  // Get exponent word (ENDIAN issues).
1312  lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
1313  // Get exponent alone in scratch2.
1314  And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
1315  // Load dest with zero. We use this either for the final shift or
1316  // for the answer.
1317  mov(dest, zero_reg);
1318  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
1319  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
1320  // the exponent that we are fastest at and also the highest exponent we can
1321  // handle here.
1322  const uint32_t non_smi_exponent =
1323  (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1324  // If we have a match of the int32-but-not-Smi exponent then skip some logic.
1325  Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
1326  // If the exponent is higher than that then go to not_int32 case. This
1327  // catches numbers that don't fit in a signed int32, infinities and NaNs.
1328  Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
1329 
1330  // We know the exponent is smaller than 30 (biased). If it is less than
1331  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
1332  // it rounds to zero.
1333  const uint32_t zero_exponent =
1334  (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
1335  Subu(scratch2, scratch2, Operand(zero_exponent));
1336  // Dest already has a Smi zero.
1337  Branch(&done, lt, scratch2, Operand(zero_reg));
1338  if (!CpuFeatures::IsSupported(FPU)) {
1339  // We have a shifted exponent between 0 and 30 in scratch2.
1340  srl(dest, scratch2, HeapNumber::kExponentShift);
1341  // We now have the exponent in dest. Subtract from 30 to get
1342  // how much to shift down.
1343  li(at, Operand(30));
1344  subu(dest, at, dest);
1345  }
1346  bind(&right_exponent);
1347  if (CpuFeatures::IsSupported(FPU)) {
1348  CpuFeatures::Scope scope(FPU);
1349  // MIPS FPU instructions implementing double precision to integer
1350  // conversion using round to zero. Since the FP value was qualified
1351  // above, the resulting integer should be a legal int32.
1352  // The original 'Exponent' word is still in scratch.
1353  lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1354  mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
1355  trunc_w_d(double_scratch, double_scratch);
1356  mfc1(dest, double_scratch);
1357  } else {
1358  // On entry, dest has final downshift, scratch has original sign/exp/mant.
1359  // Save sign bit in top bit of dest.
1360  And(scratch2, scratch, Operand(0x80000000));
1361  Or(dest, dest, Operand(scratch2));
1362  // Put back the implicit 1, just above mantissa field.
1363  Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
1364 
1365  // Shift up the mantissa bits to take up the space the exponent used to
1366  // take. We just orred in the implicit bit so that took care of one and
1367  // we want to leave the sign bit 0 so we subtract 2 bits from the shift
1368  // distance. But we want to clear the sign-bit so shift one more bit
1369  // left, then shift right one bit.
1370  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1371  sll(scratch, scratch, shift_distance + 1);
1372  srl(scratch, scratch, 1);
1373 
1374  // Get the second half of the double. For some exponents we don't
1375  // actually need this because the bits get shifted out again, but
1376  // it's probably slower to test than just to do it.
1377  lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1378  // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
1379  // The width of the field here is the same as the shift amount above.
1380  const int field_width = shift_distance;
1381  Ext(scratch2, scratch2, 32-shift_distance, field_width);
1382  Ins(scratch, scratch2, 0, field_width);
1383  // Move down according to the exponent.
1384  srlv(scratch, scratch, dest);
1385  // Prepare the negative version of our integer.
1386  subu(scratch2, zero_reg, scratch);
1387  // Trick to check sign bit (msb) held in dest, count leading zero.
1388  // 0 indicates negative, save negative version with conditional move.
1389  Clz(dest, dest);
1390  Movz(scratch, scratch2, dest);
1391  mov(dest, scratch);
1392  }
1393  bind(&done);
1394 }
1395 
1396 
1397 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1398  FPURegister result,
1399  DoubleRegister double_input,
1400  Register scratch1,
1401  Register except_flag,
1402  CheckForInexactConversion check_inexact) {
1403  ASSERT(CpuFeatures::IsSupported(FPU));
1404  CpuFeatures::Scope scope(FPU);
1405 
1406  int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1407 
1408  if (check_inexact == kDontCheckForInexactConversion) {
1409  // Ingore inexact exceptions.
1410  except_mask &= ~kFCSRInexactFlagMask;
1411  }
1412 
1413  // Save FCSR.
1414  cfc1(scratch1, FCSR);
1415  // Disable FPU exceptions.
1416  ctc1(zero_reg, FCSR);
1417 
1418  // Do operation based on rounding mode.
1419  switch (rounding_mode) {
1420  case kRoundToNearest:
1421  Round_w_d(result, double_input);
1422  break;
1423  case kRoundToZero:
1424  Trunc_w_d(result, double_input);
1425  break;
1426  case kRoundToPlusInf:
1427  Ceil_w_d(result, double_input);
1428  break;
1429  case kRoundToMinusInf:
1430  Floor_w_d(result, double_input);
1431  break;
1432  } // End of switch-statement.
1433 
1434  // Retrieve FCSR.
1435  cfc1(except_flag, FCSR);
1436  // Restore FCSR.
1437  ctc1(scratch1, FCSR);
1438 
1439  // Check for fpu exceptions.
1440  And(except_flag, except_flag, Operand(except_mask));
1441 }
1442 
1443 
1444 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1445  Register input_high,
1446  Register input_low,
1447  Register scratch) {
1448  Label done, normal_exponent, restore_sign;
1449  // Extract the biased exponent in result.
1450  Ext(result,
1451  input_high,
1452  HeapNumber::kExponentShift,
1453  HeapNumber::kExponentBits);
1454 
1455  // Check for Infinity and NaNs, which should return 0.
1456  Subu(scratch, result, HeapNumber::kExponentMask);
1457  Movz(result, zero_reg, scratch);
1458  Branch(&done, eq, scratch, Operand(zero_reg));
1459 
1460  // Express exponent as delta to (number of mantissa bits + 31).
1461  Subu(result,
1462  result,
1463  Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
1464 
1465  // If the delta is strictly positive, all bits would be shifted away,
1466  // which means that we can return 0.
1467  Branch(&normal_exponent, le, result, Operand(zero_reg));
1468  mov(result, zero_reg);
1469  Branch(&done);
1470 
1471  bind(&normal_exponent);
1472  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
1473  // Calculate shift.
1474  Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
1475 
1476  // Save the sign.
1477  Register sign = result;
1478  result = no_reg;
1479  And(sign, input_high, Operand(HeapNumber::kSignMask));
1480 
1481  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
1482  // to check for this specific case.
1483  Label high_shift_needed, high_shift_done;
1484  Branch(&high_shift_needed, lt, scratch, Operand(32));
1485  mov(input_high, zero_reg);
1486  Branch(&high_shift_done);
1487  bind(&high_shift_needed);
1488 
1489  // Set the implicit 1 before the mantissa part in input_high.
1490  Or(input_high,
1491  input_high,
1492  Operand(1 << HeapNumber::kMantissaBitsInTopWord));
1493  // Shift the mantissa bits to the correct position.
1494  // We don't need to clear non-mantissa bits as they will be shifted away.
1495  // If they weren't, it would mean that the answer is in the 32bit range.
1496  sllv(input_high, input_high, scratch);
1497 
1498  bind(&high_shift_done);
1499 
1500  // Replace the shifted bits with bits from the lower mantissa word.
1501  Label pos_shift, shift_done;
1502  li(at, 32);
1503  subu(scratch, at, scratch);
1504  Branch(&pos_shift, ge, scratch, Operand(zero_reg));
1505 
1506  // Negate scratch.
1507  Subu(scratch, zero_reg, scratch);
1508  sllv(input_low, input_low, scratch);
1509  Branch(&shift_done);
1510 
1511  bind(&pos_shift);
1512  srlv(input_low, input_low, scratch);
1513 
1514  bind(&shift_done);
1515  Or(input_high, input_high, Operand(input_low));
1516  // Restore sign if necessary.
1517  mov(scratch, sign);
1518  result = sign;
1519  sign = no_reg;
1520  Subu(result, zero_reg, input_high);
1521  Movz(result, input_high, scratch);
1522  bind(&done);
1523 }
1524 
1525 
1526 void MacroAssembler::EmitECMATruncate(Register result,
1527  FPURegister double_input,
1528  FPURegister single_scratch,
1529  Register scratch,
1530  Register scratch2,
1531  Register scratch3) {
1532  CpuFeatures::Scope scope(FPU);
1533  ASSERT(!scratch2.is(result));
1534  ASSERT(!scratch3.is(result));
1535  ASSERT(!scratch3.is(scratch2));
1536  ASSERT(!scratch.is(result) &&
1537  !scratch.is(scratch2) &&
1538  !scratch.is(scratch3));
1539  ASSERT(!single_scratch.is(double_input));
1540 
1541  Label done;
1542  Label manual;
1543 
1544  // Clear cumulative exception flags and save the FCSR.
1545  cfc1(scratch2, FCSR);
1546  ctc1(zero_reg, FCSR);
1547  // Try a conversion to a signed integer.
1548  trunc_w_d(single_scratch, double_input);
1549  mfc1(result, single_scratch);
1550  // Retrieve and restore the FCSR.
1551  cfc1(scratch, FCSR);
1552  ctc1(scratch2, FCSR);
1553  // Check for overflow and NaNs.
1554  And(scratch,
1555  scratch,
1557  // If we had no exceptions we are done.
1558  Branch(&done, eq, scratch, Operand(zero_reg));
1559 
1560  // Load the double value and perform a manual truncation.
1561  Register input_high = scratch2;
1562  Register input_low = scratch3;
1563  Move(input_low, input_high, double_input);
1564  EmitOutOfInt32RangeTruncate(result,
1565  input_high,
1566  input_low,
1567  scratch);
1568  bind(&done);
1569 }
1570 
1571 
1572 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1573  Register src,
1574  int num_least_bits) {
1575  Ext(dst, src, kSmiTagSize, num_least_bits);
1576 }
1577 
1578 
1579 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1580  Register src,
1581  int num_least_bits) {
1582  And(dst, src, Operand((1 << num_least_bits) - 1));
1583 }
1584 
1585 
1586 // Emulated condtional branches do not emit a nop in the branch delay slot.
1587 //
1588 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1589 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1590  (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1591  (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1592 
1593 
1594 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1595  BranchShort(offset, bdslot);
1596 }
1597 
1598 
1599 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1600  const Operand& rt,
1601  BranchDelaySlot bdslot) {
1602  BranchShort(offset, cond, rs, rt, bdslot);
1603 }
1604 
1605 
1606 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1607  if (L->is_bound()) {
1608  if (is_near(L)) {
1609  BranchShort(L, bdslot);
1610  } else {
1611  Jr(L, bdslot);
1612  }
1613  } else {
1614  if (is_trampoline_emitted()) {
1615  Jr(L, bdslot);
1616  } else {
1617  BranchShort(L, bdslot);
1618  }
1619  }
1620 }
1621 
1622 
1623 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1624  const Operand& rt,
1625  BranchDelaySlot bdslot) {
1626  if (L->is_bound()) {
1627  if (is_near(L)) {
1628  BranchShort(L, cond, rs, rt, bdslot);
1629  } else {
1630  Label skip;
1631  Condition neg_cond = NegateCondition(cond);
1632  BranchShort(&skip, neg_cond, rs, rt);
1633  Jr(L, bdslot);
1634  bind(&skip);
1635  }
1636  } else {
1637  if (is_trampoline_emitted()) {
1638  Label skip;
1639  Condition neg_cond = NegateCondition(cond);
1640  BranchShort(&skip, neg_cond, rs, rt);
1641  Jr(L, bdslot);
1642  bind(&skip);
1643  } else {
1644  BranchShort(L, cond, rs, rt, bdslot);
1645  }
1646  }
1647 }
1648 
1649 
1650 void MacroAssembler::Branch(Label* L,
1651  Condition cond,
1652  Register rs,
1653  Heap::RootListIndex index,
1654  BranchDelaySlot bdslot) {
1655  LoadRoot(at, index);
1656  Branch(L, cond, rs, Operand(at), bdslot);
1657 }
1658 
1659 
1660 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1661  b(offset);
1662 
1663  // Emit a nop in the branch delay slot if required.
1664  if (bdslot == PROTECT)
1665  nop();
1666 }
1667 
1668 
1669 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1670  const Operand& rt,
1671  BranchDelaySlot bdslot) {
1672  BRANCH_ARGS_CHECK(cond, rs, rt);
1673  ASSERT(!rs.is(zero_reg));
1674  Register r2 = no_reg;
1675  Register scratch = at;
1676 
1677  if (rt.is_reg()) {
1678  // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1679  // rt.
1680  r2 = rt.rm_;
1681  switch (cond) {
1682  case cc_always:
1683  b(offset);
1684  break;
1685  case eq:
1686  beq(rs, r2, offset);
1687  break;
1688  case ne:
1689  bne(rs, r2, offset);
1690  break;
1691  // Signed comparison.
1692  case greater:
1693  if (r2.is(zero_reg)) {
1694  bgtz(rs, offset);
1695  } else {
1696  slt(scratch, r2, rs);
1697  bne(scratch, zero_reg, offset);
1698  }
1699  break;
1700  case greater_equal:
1701  if (r2.is(zero_reg)) {
1702  bgez(rs, offset);
1703  } else {
1704  slt(scratch, rs, r2);
1705  beq(scratch, zero_reg, offset);
1706  }
1707  break;
1708  case less:
1709  if (r2.is(zero_reg)) {
1710  bltz(rs, offset);
1711  } else {
1712  slt(scratch, rs, r2);
1713  bne(scratch, zero_reg, offset);
1714  }
1715  break;
1716  case less_equal:
1717  if (r2.is(zero_reg)) {
1718  blez(rs, offset);
1719  } else {
1720  slt(scratch, r2, rs);
1721  beq(scratch, zero_reg, offset);
1722  }
1723  break;
1724  // Unsigned comparison.
1725  case Ugreater:
1726  if (r2.is(zero_reg)) {
1727  bgtz(rs, offset);
1728  } else {
1729  sltu(scratch, r2, rs);
1730  bne(scratch, zero_reg, offset);
1731  }
1732  break;
1733  case Ugreater_equal:
1734  if (r2.is(zero_reg)) {
1735  bgez(rs, offset);
1736  } else {
1737  sltu(scratch, rs, r2);
1738  beq(scratch, zero_reg, offset);
1739  }
1740  break;
1741  case Uless:
1742  if (r2.is(zero_reg)) {
1743  // No code needs to be emitted.
1744  return;
1745  } else {
1746  sltu(scratch, rs, r2);
1747  bne(scratch, zero_reg, offset);
1748  }
1749  break;
1750  case Uless_equal:
1751  if (r2.is(zero_reg)) {
1752  b(offset);
1753  } else {
1754  sltu(scratch, r2, rs);
1755  beq(scratch, zero_reg, offset);
1756  }
1757  break;
1758  default:
1759  UNREACHABLE();
1760  }
1761  } else {
1762  // Be careful to always use shifted_branch_offset only just before the
1763  // branch instruction, as the location will be remember for patching the
1764  // target.
1765  switch (cond) {
1766  case cc_always:
1767  b(offset);
1768  break;
1769  case eq:
1770  // We don't want any other register but scratch clobbered.
1771  ASSERT(!scratch.is(rs));
1772  r2 = scratch;
1773  li(r2, rt);
1774  beq(rs, r2, offset);
1775  break;
1776  case ne:
1777  // We don't want any other register but scratch clobbered.
1778  ASSERT(!scratch.is(rs));
1779  r2 = scratch;
1780  li(r2, rt);
1781  bne(rs, r2, offset);
1782  break;
1783  // Signed comparison.
1784  case greater:
1785  if (rt.imm32_ == 0) {
1786  bgtz(rs, offset);
1787  } else {
1788  r2 = scratch;
1789  li(r2, rt);
1790  slt(scratch, r2, rs);
1791  bne(scratch, zero_reg, offset);
1792  }
1793  break;
1794  case greater_equal:
1795  if (rt.imm32_ == 0) {
1796  bgez(rs, offset);
1797  } else if (is_int16(rt.imm32_)) {
1798  slti(scratch, rs, rt.imm32_);
1799  beq(scratch, zero_reg, offset);
1800  } else {
1801  r2 = scratch;
1802  li(r2, rt);
1803  slt(scratch, rs, r2);
1804  beq(scratch, zero_reg, offset);
1805  }
1806  break;
1807  case less:
1808  if (rt.imm32_ == 0) {
1809  bltz(rs, offset);
1810  } else if (is_int16(rt.imm32_)) {
1811  slti(scratch, rs, rt.imm32_);
1812  bne(scratch, zero_reg, offset);
1813  } else {
1814  r2 = scratch;
1815  li(r2, rt);
1816  slt(scratch, rs, r2);
1817  bne(scratch, zero_reg, offset);
1818  }
1819  break;
1820  case less_equal:
1821  if (rt.imm32_ == 0) {
1822  blez(rs, offset);
1823  } else {
1824  r2 = scratch;
1825  li(r2, rt);
1826  slt(scratch, r2, rs);
1827  beq(scratch, zero_reg, offset);
1828  }
1829  break;
1830  // Unsigned comparison.
1831  case Ugreater:
1832  if (rt.imm32_ == 0) {
1833  bgtz(rs, offset);
1834  } else {
1835  r2 = scratch;
1836  li(r2, rt);
1837  sltu(scratch, r2, rs);
1838  bne(scratch, zero_reg, offset);
1839  }
1840  break;
1841  case Ugreater_equal:
1842  if (rt.imm32_ == 0) {
1843  bgez(rs, offset);
1844  } else if (is_int16(rt.imm32_)) {
1845  sltiu(scratch, rs, rt.imm32_);
1846  beq(scratch, zero_reg, offset);
1847  } else {
1848  r2 = scratch;
1849  li(r2, rt);
1850  sltu(scratch, rs, r2);
1851  beq(scratch, zero_reg, offset);
1852  }
1853  break;
1854  case Uless:
1855  if (rt.imm32_ == 0) {
1856  // No code needs to be emitted.
1857  return;
1858  } else if (is_int16(rt.imm32_)) {
1859  sltiu(scratch, rs, rt.imm32_);
1860  bne(scratch, zero_reg, offset);
1861  } else {
1862  r2 = scratch;
1863  li(r2, rt);
1864  sltu(scratch, rs, r2);
1865  bne(scratch, zero_reg, offset);
1866  }
1867  break;
1868  case Uless_equal:
1869  if (rt.imm32_ == 0) {
1870  b(offset);
1871  } else {
1872  r2 = scratch;
1873  li(r2, rt);
1874  sltu(scratch, r2, rs);
1875  beq(scratch, zero_reg, offset);
1876  }
1877  break;
1878  default:
1879  UNREACHABLE();
1880  }
1881  }
1882  // Emit a nop in the branch delay slot if required.
1883  if (bdslot == PROTECT)
1884  nop();
1885 }
1886 
1887 
1888 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1889  // We use branch_offset as an argument for the branch instructions to be sure
1890  // it is called just before generating the branch instruction, as needed.
1891 
1892  b(shifted_branch_offset(L, false));
1893 
1894  // Emit a nop in the branch delay slot if required.
1895  if (bdslot == PROTECT)
1896  nop();
1897 }
1898 
1899 
1900 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1901  const Operand& rt,
1902  BranchDelaySlot bdslot) {
1903  BRANCH_ARGS_CHECK(cond, rs, rt);
1904 
1905  int32_t offset;
1906  Register r2 = no_reg;
1907  Register scratch = at;
1908  if (rt.is_reg()) {
1909  r2 = rt.rm_;
1910  // Be careful to always use shifted_branch_offset only just before the
1911  // branch instruction, as the location will be remember for patching the
1912  // target.
1913  switch (cond) {
1914  case cc_always:
1915  offset = shifted_branch_offset(L, false);
1916  b(offset);
1917  break;
1918  case eq:
1919  offset = shifted_branch_offset(L, false);
1920  beq(rs, r2, offset);
1921  break;
1922  case ne:
1923  offset = shifted_branch_offset(L, false);
1924  bne(rs, r2, offset);
1925  break;
1926  // Signed comparison.
1927  case greater:
1928  if (r2.is(zero_reg)) {
1929  offset = shifted_branch_offset(L, false);
1930  bgtz(rs, offset);
1931  } else {
1932  slt(scratch, r2, rs);
1933  offset = shifted_branch_offset(L, false);
1934  bne(scratch, zero_reg, offset);
1935  }
1936  break;
1937  case greater_equal:
1938  if (r2.is(zero_reg)) {
1939  offset = shifted_branch_offset(L, false);
1940  bgez(rs, offset);
1941  } else {
1942  slt(scratch, rs, r2);
1943  offset = shifted_branch_offset(L, false);
1944  beq(scratch, zero_reg, offset);
1945  }
1946  break;
1947  case less:
1948  if (r2.is(zero_reg)) {
1949  offset = shifted_branch_offset(L, false);
1950  bltz(rs, offset);
1951  } else {
1952  slt(scratch, rs, r2);
1953  offset = shifted_branch_offset(L, false);
1954  bne(scratch, zero_reg, offset);
1955  }
1956  break;
1957  case less_equal:
1958  if (r2.is(zero_reg)) {
1959  offset = shifted_branch_offset(L, false);
1960  blez(rs, offset);
1961  } else {
1962  slt(scratch, r2, rs);
1963  offset = shifted_branch_offset(L, false);
1964  beq(scratch, zero_reg, offset);
1965  }
1966  break;
1967  // Unsigned comparison.
1968  case Ugreater:
1969  if (r2.is(zero_reg)) {
1970  offset = shifted_branch_offset(L, false);
1971  bgtz(rs, offset);
1972  } else {
1973  sltu(scratch, r2, rs);
1974  offset = shifted_branch_offset(L, false);
1975  bne(scratch, zero_reg, offset);
1976  }
1977  break;
1978  case Ugreater_equal:
1979  if (r2.is(zero_reg)) {
1980  offset = shifted_branch_offset(L, false);
1981  bgez(rs, offset);
1982  } else {
1983  sltu(scratch, rs, r2);
1984  offset = shifted_branch_offset(L, false);
1985  beq(scratch, zero_reg, offset);
1986  }
1987  break;
1988  case Uless:
1989  if (r2.is(zero_reg)) {
1990  // No code needs to be emitted.
1991  return;
1992  } else {
1993  sltu(scratch, rs, r2);
1994  offset = shifted_branch_offset(L, false);
1995  bne(scratch, zero_reg, offset);
1996  }
1997  break;
1998  case Uless_equal:
1999  if (r2.is(zero_reg)) {
2000  offset = shifted_branch_offset(L, false);
2001  b(offset);
2002  } else {
2003  sltu(scratch, r2, rs);
2004  offset = shifted_branch_offset(L, false);
2005  beq(scratch, zero_reg, offset);
2006  }
2007  break;
2008  default:
2009  UNREACHABLE();
2010  }
2011  } else {
2012  // Be careful to always use shifted_branch_offset only just before the
2013  // branch instruction, as the location will be remember for patching the
2014  // target.
2015  switch (cond) {
2016  case cc_always:
2017  offset = shifted_branch_offset(L, false);
2018  b(offset);
2019  break;
2020  case eq:
2021  ASSERT(!scratch.is(rs));
2022  r2 = scratch;
2023  li(r2, rt);
2024  offset = shifted_branch_offset(L, false);
2025  beq(rs, r2, offset);
2026  break;
2027  case ne:
2028  ASSERT(!scratch.is(rs));
2029  r2 = scratch;
2030  li(r2, rt);
2031  offset = shifted_branch_offset(L, false);
2032  bne(rs, r2, offset);
2033  break;
2034  // Signed comparison.
2035  case greater:
2036  if (rt.imm32_ == 0) {
2037  offset = shifted_branch_offset(L, false);
2038  bgtz(rs, offset);
2039  } else {
2040  ASSERT(!scratch.is(rs));
2041  r2 = scratch;
2042  li(r2, rt);
2043  slt(scratch, r2, rs);
2044  offset = shifted_branch_offset(L, false);
2045  bne(scratch, zero_reg, offset);
2046  }
2047  break;
2048  case greater_equal:
2049  if (rt.imm32_ == 0) {
2050  offset = shifted_branch_offset(L, false);
2051  bgez(rs, offset);
2052  } else if (is_int16(rt.imm32_)) {
2053  slti(scratch, rs, rt.imm32_);
2054  offset = shifted_branch_offset(L, false);
2055  beq(scratch, zero_reg, offset);
2056  } else {
2057  ASSERT(!scratch.is(rs));
2058  r2 = scratch;
2059  li(r2, rt);
2060  slt(scratch, rs, r2);
2061  offset = shifted_branch_offset(L, false);
2062  beq(scratch, zero_reg, offset);
2063  }
2064  break;
2065  case less:
2066  if (rt.imm32_ == 0) {
2067  offset = shifted_branch_offset(L, false);
2068  bltz(rs, offset);
2069  } else if (is_int16(rt.imm32_)) {
2070  slti(scratch, rs, rt.imm32_);
2071  offset = shifted_branch_offset(L, false);
2072  bne(scratch, zero_reg, offset);
2073  } else {
2074  ASSERT(!scratch.is(rs));
2075  r2 = scratch;
2076  li(r2, rt);
2077  slt(scratch, rs, r2);
2078  offset = shifted_branch_offset(L, false);
2079  bne(scratch, zero_reg, offset);
2080  }
2081  break;
2082  case less_equal:
2083  if (rt.imm32_ == 0) {
2084  offset = shifted_branch_offset(L, false);
2085  blez(rs, offset);
2086  } else {
2087  ASSERT(!scratch.is(rs));
2088  r2 = scratch;
2089  li(r2, rt);
2090  slt(scratch, r2, rs);
2091  offset = shifted_branch_offset(L, false);
2092  beq(scratch, zero_reg, offset);
2093  }
2094  break;
2095  // Unsigned comparison.
2096  case Ugreater:
2097  if (rt.imm32_ == 0) {
2098  offset = shifted_branch_offset(L, false);
2099  bgtz(rs, offset);
2100  } else {
2101  ASSERT(!scratch.is(rs));
2102  r2 = scratch;
2103  li(r2, rt);
2104  sltu(scratch, r2, rs);
2105  offset = shifted_branch_offset(L, false);
2106  bne(scratch, zero_reg, offset);
2107  }
2108  break;
2109  case Ugreater_equal:
2110  if (rt.imm32_ == 0) {
2111  offset = shifted_branch_offset(L, false);
2112  bgez(rs, offset);
2113  } else if (is_int16(rt.imm32_)) {
2114  sltiu(scratch, rs, rt.imm32_);
2115  offset = shifted_branch_offset(L, false);
2116  beq(scratch, zero_reg, offset);
2117  } else {
2118  ASSERT(!scratch.is(rs));
2119  r2 = scratch;
2120  li(r2, rt);
2121  sltu(scratch, rs, r2);
2122  offset = shifted_branch_offset(L, false);
2123  beq(scratch, zero_reg, offset);
2124  }
2125  break;
2126  case Uless:
2127  if (rt.imm32_ == 0) {
2128  // No code needs to be emitted.
2129  return;
2130  } else if (is_int16(rt.imm32_)) {
2131  sltiu(scratch, rs, rt.imm32_);
2132  offset = shifted_branch_offset(L, false);
2133  bne(scratch, zero_reg, offset);
2134  } else {
2135  ASSERT(!scratch.is(rs));
2136  r2 = scratch;
2137  li(r2, rt);
2138  sltu(scratch, rs, r2);
2139  offset = shifted_branch_offset(L, false);
2140  bne(scratch, zero_reg, offset);
2141  }
2142  break;
2143  case Uless_equal:
2144  if (rt.imm32_ == 0) {
2145  offset = shifted_branch_offset(L, false);
2146  b(offset);
2147  } else {
2148  ASSERT(!scratch.is(rs));
2149  r2 = scratch;
2150  li(r2, rt);
2151  sltu(scratch, r2, rs);
2152  offset = shifted_branch_offset(L, false);
2153  beq(scratch, zero_reg, offset);
2154  }
2155  break;
2156  default:
2157  UNREACHABLE();
2158  }
2159  }
2160  // Check that offset could actually hold on an int16_t.
2161  ASSERT(is_int16(offset));
2162  // Emit a nop in the branch delay slot if required.
2163  if (bdslot == PROTECT)
2164  nop();
2165 }
2166 
2167 
2168 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2169  BranchAndLinkShort(offset, bdslot);
2170 }
2171 
2172 
2173 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2174  const Operand& rt,
2175  BranchDelaySlot bdslot) {
2176  BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2177 }
2178 
2179 
2180 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2181  if (L->is_bound()) {
2182  if (is_near(L)) {
2183  BranchAndLinkShort(L, bdslot);
2184  } else {
2185  Jalr(L, bdslot);
2186  }
2187  } else {
2188  if (is_trampoline_emitted()) {
2189  Jalr(L, bdslot);
2190  } else {
2191  BranchAndLinkShort(L, bdslot);
2192  }
2193  }
2194 }
2195 
2196 
2197 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2198  const Operand& rt,
2199  BranchDelaySlot bdslot) {
2200  if (L->is_bound()) {
2201  if (is_near(L)) {
2202  BranchAndLinkShort(L, cond, rs, rt, bdslot);
2203  } else {
2204  Label skip;
2205  Condition neg_cond = NegateCondition(cond);
2206  BranchShort(&skip, neg_cond, rs, rt);
2207  Jalr(L, bdslot);
2208  bind(&skip);
2209  }
2210  } else {
2211  if (is_trampoline_emitted()) {
2212  Label skip;
2213  Condition neg_cond = NegateCondition(cond);
2214  BranchShort(&skip, neg_cond, rs, rt);
2215  Jalr(L, bdslot);
2216  bind(&skip);
2217  } else {
2218  BranchAndLinkShort(L, cond, rs, rt, bdslot);
2219  }
2220  }
2221 }
2222 
2223 
2224 // We need to use a bgezal or bltzal, but they can't be used directly with the
2225 // slt instructions. We could use sub or add instead but we would miss overflow
2226 // cases, so we keep slt and add an intermediate third instruction.
2227 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2228  BranchDelaySlot bdslot) {
2229  bal(offset);
2230 
2231  // Emit a nop in the branch delay slot if required.
2232  if (bdslot == PROTECT)
2233  nop();
2234 }
2235 
2236 
2237 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2238  Register rs, const Operand& rt,
2239  BranchDelaySlot bdslot) {
2240  BRANCH_ARGS_CHECK(cond, rs, rt);
2241  Register r2 = no_reg;
2242  Register scratch = at;
2243 
2244  if (rt.is_reg()) {
2245  r2 = rt.rm_;
2246  } else if (cond != cc_always) {
2247  r2 = scratch;
2248  li(r2, rt);
2249  }
2250 
2251  switch (cond) {
2252  case cc_always:
2253  bal(offset);
2254  break;
2255  case eq:
2256  bne(rs, r2, 2);
2257  nop();
2258  bal(offset);
2259  break;
2260  case ne:
2261  beq(rs, r2, 2);
2262  nop();
2263  bal(offset);
2264  break;
2265 
2266  // Signed comparison.
2267  case greater:
2268  slt(scratch, r2, rs);
2269  addiu(scratch, scratch, -1);
2270  bgezal(scratch, offset);
2271  break;
2272  case greater_equal:
2273  slt(scratch, rs, r2);
2274  addiu(scratch, scratch, -1);
2275  bltzal(scratch, offset);
2276  break;
2277  case less:
2278  slt(scratch, rs, r2);
2279  addiu(scratch, scratch, -1);
2280  bgezal(scratch, offset);
2281  break;
2282  case less_equal:
2283  slt(scratch, r2, rs);
2284  addiu(scratch, scratch, -1);
2285  bltzal(scratch, offset);
2286  break;
2287 
2288  // Unsigned comparison.
2289  case Ugreater:
2290  sltu(scratch, r2, rs);
2291  addiu(scratch, scratch, -1);
2292  bgezal(scratch, offset);
2293  break;
2294  case Ugreater_equal:
2295  sltu(scratch, rs, r2);
2296  addiu(scratch, scratch, -1);
2297  bltzal(scratch, offset);
2298  break;
2299  case Uless:
2300  sltu(scratch, rs, r2);
2301  addiu(scratch, scratch, -1);
2302  bgezal(scratch, offset);
2303  break;
2304  case Uless_equal:
2305  sltu(scratch, r2, rs);
2306  addiu(scratch, scratch, -1);
2307  bltzal(scratch, offset);
2308  break;
2309 
2310  default:
2311  UNREACHABLE();
2312  }
2313  // Emit a nop in the branch delay slot if required.
2314  if (bdslot == PROTECT)
2315  nop();
2316 }
2317 
2318 
2319 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2320  bal(shifted_branch_offset(L, false));
2321 
2322  // Emit a nop in the branch delay slot if required.
2323  if (bdslot == PROTECT)
2324  nop();
2325 }
2326 
2327 
2328 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2329  const Operand& rt,
2330  BranchDelaySlot bdslot) {
2331  BRANCH_ARGS_CHECK(cond, rs, rt);
2332 
2333  int32_t offset;
2334  Register r2 = no_reg;
2335  Register scratch = at;
2336  if (rt.is_reg()) {
2337  r2 = rt.rm_;
2338  } else if (cond != cc_always) {
2339  r2 = scratch;
2340  li(r2, rt);
2341  }
2342 
2343  switch (cond) {
2344  case cc_always:
2345  offset = shifted_branch_offset(L, false);
2346  bal(offset);
2347  break;
2348  case eq:
2349  bne(rs, r2, 2);
2350  nop();
2351  offset = shifted_branch_offset(L, false);
2352  bal(offset);
2353  break;
2354  case ne:
2355  beq(rs, r2, 2);
2356  nop();
2357  offset = shifted_branch_offset(L, false);
2358  bal(offset);
2359  break;
2360 
2361  // Signed comparison.
2362  case greater:
2363  slt(scratch, r2, rs);
2364  addiu(scratch, scratch, -1);
2365  offset = shifted_branch_offset(L, false);
2366  bgezal(scratch, offset);
2367  break;
2368  case greater_equal:
2369  slt(scratch, rs, r2);
2370  addiu(scratch, scratch, -1);
2371  offset = shifted_branch_offset(L, false);
2372  bltzal(scratch, offset);
2373  break;
2374  case less:
2375  slt(scratch, rs, r2);
2376  addiu(scratch, scratch, -1);
2377  offset = shifted_branch_offset(L, false);
2378  bgezal(scratch, offset);
2379  break;
2380  case less_equal:
2381  slt(scratch, r2, rs);
2382  addiu(scratch, scratch, -1);
2383  offset = shifted_branch_offset(L, false);
2384  bltzal(scratch, offset);
2385  break;
2386 
2387  // Unsigned comparison.
2388  case Ugreater:
2389  sltu(scratch, r2, rs);
2390  addiu(scratch, scratch, -1);
2391  offset = shifted_branch_offset(L, false);
2392  bgezal(scratch, offset);
2393  break;
2394  case Ugreater_equal:
2395  sltu(scratch, rs, r2);
2396  addiu(scratch, scratch, -1);
2397  offset = shifted_branch_offset(L, false);
2398  bltzal(scratch, offset);
2399  break;
2400  case Uless:
2401  sltu(scratch, rs, r2);
2402  addiu(scratch, scratch, -1);
2403  offset = shifted_branch_offset(L, false);
2404  bgezal(scratch, offset);
2405  break;
2406  case Uless_equal:
2407  sltu(scratch, r2, rs);
2408  addiu(scratch, scratch, -1);
2409  offset = shifted_branch_offset(L, false);
2410  bltzal(scratch, offset);
2411  break;
2412 
2413  default:
2414  UNREACHABLE();
2415  }
2416 
2417  // Check that offset could actually hold on an int16_t.
2418  ASSERT(is_int16(offset));
2419 
2420  // Emit a nop in the branch delay slot if required.
2421  if (bdslot == PROTECT)
2422  nop();
2423 }
2424 
2425 
2426 void MacroAssembler::Jump(Register target,
2427  Condition cond,
2428  Register rs,
2429  const Operand& rt,
2430  BranchDelaySlot bd) {
2431  BlockTrampolinePoolScope block_trampoline_pool(this);
2432  if (cond == cc_always) {
2433  jr(target);
2434  } else {
2435  BRANCH_ARGS_CHECK(cond, rs, rt);
2436  Branch(2, NegateCondition(cond), rs, rt);
2437  jr(target);
2438  }
2439  // Emit a nop in the branch delay slot if required.
2440  if (bd == PROTECT)
2441  nop();
2442 }
2443 
2444 
2445 void MacroAssembler::Jump(intptr_t target,
2446  RelocInfo::Mode rmode,
2447  Condition cond,
2448  Register rs,
2449  const Operand& rt,
2450  BranchDelaySlot bd) {
2451  Label skip;
2452  if (cond != cc_always) {
2453  Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2454  }
2455  // The first instruction of 'li' may be placed in the delay slot.
2456  // This is not an issue, t9 is expected to be clobbered anyway.
2457  li(t9, Operand(target, rmode));
2458  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2459  bind(&skip);
2460 }
2461 
2462 
2463 void MacroAssembler::Jump(Address target,
2464  RelocInfo::Mode rmode,
2465  Condition cond,
2466  Register rs,
2467  const Operand& rt,
2468  BranchDelaySlot bd) {
2469  ASSERT(!RelocInfo::IsCodeTarget(rmode));
2470  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2471 }
2472 
2473 
2474 void MacroAssembler::Jump(Handle<Code> code,
2475  RelocInfo::Mode rmode,
2476  Condition cond,
2477  Register rs,
2478  const Operand& rt,
2479  BranchDelaySlot bd) {
2480  ASSERT(RelocInfo::IsCodeTarget(rmode));
2481  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2482 }
2483 
2484 
2485 int MacroAssembler::CallSize(Register target,
2486  Condition cond,
2487  Register rs,
2488  const Operand& rt,
2489  BranchDelaySlot bd) {
2490  int size = 0;
2491 
2492  if (cond == cc_always) {
2493  size += 1;
2494  } else {
2495  size += 3;
2496  }
2497 
2498  if (bd == PROTECT)
2499  size += 1;
2500 
2501  return size * kInstrSize;
2502 }
2503 
2504 
2505 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2506 void MacroAssembler::Call(Register target,
2507  Condition cond,
2508  Register rs,
2509  const Operand& rt,
2510  BranchDelaySlot bd) {
2511  BlockTrampolinePoolScope block_trampoline_pool(this);
2512  Label start;
2513  bind(&start);
2514  if (cond == cc_always) {
2515  jalr(target);
2516  } else {
2517  BRANCH_ARGS_CHECK(cond, rs, rt);
2518  Branch(2, NegateCondition(cond), rs, rt);
2519  jalr(target);
2520  }
2521  // Emit a nop in the branch delay slot if required.
2522  if (bd == PROTECT)
2523  nop();
2524 
2525  ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2526  SizeOfCodeGeneratedSince(&start));
2527 }
2528 
2529 
2530 int MacroAssembler::CallSize(Address target,
2531  RelocInfo::Mode rmode,
2532  Condition cond,
2533  Register rs,
2534  const Operand& rt,
2535  BranchDelaySlot bd) {
2536  int size = CallSize(t9, cond, rs, rt, bd);
2537  return size + 2 * kInstrSize;
2538 }
2539 
2540 
2541 void MacroAssembler::Call(Address target,
2542  RelocInfo::Mode rmode,
2543  Condition cond,
2544  Register rs,
2545  const Operand& rt,
2546  BranchDelaySlot bd) {
2547  BlockTrampolinePoolScope block_trampoline_pool(this);
2548  Label start;
2549  bind(&start);
2550  int32_t target_int = reinterpret_cast<int32_t>(target);
2551  // Must record previous source positions before the
2552  // li() generates a new code target.
2553  positions_recorder()->WriteRecordedPositions();
2554  li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2555  Call(t9, cond, rs, rt, bd);
2556  ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2557  SizeOfCodeGeneratedSince(&start));
2558 }
2559 
2560 
2561 int MacroAssembler::CallSize(Handle<Code> code,
2562  RelocInfo::Mode rmode,
2563  TypeFeedbackId ast_id,
2564  Condition cond,
2565  Register rs,
2566  const Operand& rt,
2567  BranchDelaySlot bd) {
2568  return CallSize(reinterpret_cast<Address>(code.location()),
2569  rmode, cond, rs, rt, bd);
2570 }
2571 
2572 
2573 void MacroAssembler::Call(Handle<Code> code,
2574  RelocInfo::Mode rmode,
2575  TypeFeedbackId ast_id,
2576  Condition cond,
2577  Register rs,
2578  const Operand& rt,
2579  BranchDelaySlot bd) {
2580  BlockTrampolinePoolScope block_trampoline_pool(this);
2581  Label start;
2582  bind(&start);
2583  ASSERT(RelocInfo::IsCodeTarget(rmode));
2584  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2585  SetRecordedAstId(ast_id);
2586  rmode = RelocInfo::CODE_TARGET_WITH_ID;
2587  }
2588  Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2589  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2590  SizeOfCodeGeneratedSince(&start));
2591 }
2592 
2593 
2594 void MacroAssembler::Ret(Condition cond,
2595  Register rs,
2596  const Operand& rt,
2597  BranchDelaySlot bd) {
2598  Jump(ra, cond, rs, rt, bd);
2599 }
2600 
2601 
2602 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2603  BlockTrampolinePoolScope block_trampoline_pool(this);
2604 
2605  uint32_t imm28;
2606  imm28 = jump_address(L);
2607  imm28 &= kImm28Mask;
2608  { BlockGrowBufferScope block_buf_growth(this);
2609  // Buffer growth (and relocation) must be blocked for internal references
2610  // until associated instructions are emitted and available to be patched.
2611  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2612  j(imm28);
2613  }
2614  // Emit a nop in the branch delay slot if required.
2615  if (bdslot == PROTECT)
2616  nop();
2617 }
2618 
2619 
2620 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2621  BlockTrampolinePoolScope block_trampoline_pool(this);
2622 
2623  uint32_t imm32;
2624  imm32 = jump_address(L);
2625  { BlockGrowBufferScope block_buf_growth(this);
2626  // Buffer growth (and relocation) must be blocked for internal references
2627  // until associated instructions are emitted and available to be patched.
2628  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2629  lui(at, (imm32 & kHiMask) >> kLuiShift);
2630  ori(at, at, (imm32 & kImm16Mask));
2631  }
2632  jr(at);
2633 
2634  // Emit a nop in the branch delay slot if required.
2635  if (bdslot == PROTECT)
2636  nop();
2637 }
2638 
2639 
2640 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2641  BlockTrampolinePoolScope block_trampoline_pool(this);
2642 
2643  uint32_t imm32;
2644  imm32 = jump_address(L);
2645  { BlockGrowBufferScope block_buf_growth(this);
2646  // Buffer growth (and relocation) must be blocked for internal references
2647  // until associated instructions are emitted and available to be patched.
2648  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2649  lui(at, (imm32 & kHiMask) >> kLuiShift);
2650  ori(at, at, (imm32 & kImm16Mask));
2651  }
2652  jalr(at);
2653 
2654  // Emit a nop in the branch delay slot if required.
2655  if (bdslot == PROTECT)
2656  nop();
2657 }
2658 
2659 void MacroAssembler::DropAndRet(int drop) {
2660  Ret(USE_DELAY_SLOT);
2661  addiu(sp, sp, drop * kPointerSize);
2662 }
2663 
2664 void MacroAssembler::DropAndRet(int drop,
2665  Condition cond,
2666  Register r1,
2667  const Operand& r2) {
2668  // Both Drop and Ret need to be conditional.
2669  Label skip;
2670  if (cond != cc_always) {
2671  Branch(&skip, NegateCondition(cond), r1, r2);
2672  }
2673 
2674  Drop(drop);
2675  Ret();
2676 
2677  if (cond != cc_always) {
2678  bind(&skip);
2679  }
2680 }
2681 
2682 
2683 void MacroAssembler::Drop(int count,
2684  Condition cond,
2685  Register reg,
2686  const Operand& op) {
2687  if (count <= 0) {
2688  return;
2689  }
2690 
2691  Label skip;
2692 
2693  if (cond != al) {
2694  Branch(&skip, NegateCondition(cond), reg, op);
2695  }
2696 
2697  addiu(sp, sp, count * kPointerSize);
2698 
2699  if (cond != al) {
2700  bind(&skip);
2701  }
2702 }
2703 
2704 
2705 
2706 void MacroAssembler::Swap(Register reg1,
2707  Register reg2,
2708  Register scratch) {
2709  if (scratch.is(no_reg)) {
2710  Xor(reg1, reg1, Operand(reg2));
2711  Xor(reg2, reg2, Operand(reg1));
2712  Xor(reg1, reg1, Operand(reg2));
2713  } else {
2714  mov(scratch, reg1);
2715  mov(reg1, reg2);
2716  mov(reg2, scratch);
2717  }
2718 }
2719 
2720 
2721 void MacroAssembler::Call(Label* target) {
2722  BranchAndLink(target);
2723 }
2724 
2725 
2726 void MacroAssembler::Push(Handle<Object> handle) {
2727  li(at, Operand(handle));
2728  push(at);
2729 }
2730 
2731 
2732 #ifdef ENABLE_DEBUGGER_SUPPORT
2733 
2734 void MacroAssembler::DebugBreak() {
2735  PrepareCEntryArgs(0);
2736  PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2737  CEntryStub ces(1);
2738  ASSERT(AllowThisStubCall(&ces));
2739  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2740 }
2741 
2742 #endif // ENABLE_DEBUGGER_SUPPORT
2743 
2744 
2745 // ---------------------------------------------------------------------------
2746 // Exception handling.
2747 
2748 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2749  int handler_index) {
2750  // Adjust this code if not the case.
2751  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2752  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2753  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2754  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2755  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2756  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2757 
2758  // For the JSEntry handler, we must preserve a0-a3 and s0.
2759  // t1-t3 are available. We will build up the handler from the bottom by
2760  // pushing on the stack.
2761  // Set up the code object (t1) and the state (t2) for pushing.
2762  unsigned state =
2763  StackHandler::IndexField::encode(handler_index) |
2764  StackHandler::KindField::encode(kind);
2765  li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2766  li(t2, Operand(state));
2767 
2768  // Push the frame pointer, context, state, and code object.
2769  if (kind == StackHandler::JS_ENTRY) {
2770  ASSERT_EQ(Smi::FromInt(0), 0);
2771  // The second zero_reg indicates no context.
2772  // The first zero_reg is the NULL frame pointer.
2773  // The operands are reversed to match the order of MultiPush/Pop.
2774  Push(zero_reg, zero_reg, t2, t1);
2775  } else {
2776  MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2777  }
2778 
2779  // Link the current handler as the next handler.
2780  li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2781  lw(t1, MemOperand(t2));
2782  push(t1);
2783  // Set this new handler as the current one.
2784  sw(sp, MemOperand(t2));
2785 }
2786 
2787 
2788 void MacroAssembler::PopTryHandler() {
2789  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2790  pop(a1);
2791  Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2792  li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2793  sw(a1, MemOperand(at));
2794 }
2795 
2796 
2797 void MacroAssembler::JumpToHandlerEntry() {
2798  // Compute the handler entry address and jump to it. The handler table is
2799  // a fixed array of (smi-tagged) code offsets.
2800  // v0 = exception, a1 = code object, a2 = state.
2801  lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
2802  Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2803  srl(a2, a2, StackHandler::kKindWidth); // Handler index.
2804  sll(a2, a2, kPointerSizeLog2);
2805  Addu(a2, a3, a2);
2806  lw(a2, MemOperand(a2)); // Smi-tagged offset.
2807  Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
2808  sra(t9, a2, kSmiTagSize);
2809  Addu(t9, t9, a1);
2810  Jump(t9); // Jump.
2811 }
2812 
2813 
2814 void MacroAssembler::Throw(Register value) {
2815  // Adjust this code if not the case.
2816  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2817  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2818  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2819  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2820  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2821  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2822 
2823  // The exception is expected in v0.
2824  Move(v0, value);
2825 
2826  // Drop the stack pointer to the top of the top handler.
2827  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2828  isolate())));
2829  lw(sp, MemOperand(a3));
2830 
2831  // Restore the next handler.
2832  pop(a2);
2833  sw(a2, MemOperand(a3));
2834 
2835  // Get the code object (a1) and state (a2). Restore the context and frame
2836  // pointer.
2837  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2838 
2839  // If the handler is a JS frame, restore the context to the frame.
2840  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2841  // or cp.
2842  Label done;
2843  Branch(&done, eq, cp, Operand(zero_reg));
2844  sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2845  bind(&done);
2846 
2847  JumpToHandlerEntry();
2848 }
2849 
2850 
2851 void MacroAssembler::ThrowUncatchable(Register value) {
2852  // Adjust this code if not the case.
2853  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2854  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2855  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2856  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2857  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2858  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2859 
2860  // The exception is expected in v0.
2861  if (!value.is(v0)) {
2862  mov(v0, value);
2863  }
2864  // Drop the stack pointer to the top of the top stack handler.
2865  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2866  lw(sp, MemOperand(a3));
2867 
2868  // Unwind the handlers until the ENTRY handler is found.
2869  Label fetch_next, check_kind;
2870  jmp(&check_kind);
2871  bind(&fetch_next);
2872  lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2873 
2874  bind(&check_kind);
2875  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2876  lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2877  And(a2, a2, Operand(StackHandler::KindField::kMask));
2878  Branch(&fetch_next, ne, a2, Operand(zero_reg));
2879 
2880  // Set the top handler address to next handler past the top ENTRY handler.
2881  pop(a2);
2882  sw(a2, MemOperand(a3));
2883 
2884  // Get the code object (a1) and state (a2). Clear the context and frame
2885  // pointer (0 was saved in the handler).
2886  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2887 
2888  JumpToHandlerEntry();
2889 }
2890 
2891 
2892 void MacroAssembler::AllocateInNewSpace(int object_size,
2893  Register result,
2894  Register scratch1,
2895  Register scratch2,
2896  Label* gc_required,
2898  if (!FLAG_inline_new) {
2899  if (emit_debug_code()) {
2900  // Trash the registers to simulate an allocation failure.
2901  li(result, 0x7091);
2902  li(scratch1, 0x7191);
2903  li(scratch2, 0x7291);
2904  }
2905  jmp(gc_required);
2906  return;
2907  }
2908 
2909  ASSERT(!result.is(scratch1));
2910  ASSERT(!result.is(scratch2));
2911  ASSERT(!scratch1.is(scratch2));
2912  ASSERT(!scratch1.is(t9));
2913  ASSERT(!scratch2.is(t9));
2914  ASSERT(!result.is(t9));
2915 
2916  // Make object size into bytes.
2917  if ((flags & SIZE_IN_WORDS) != 0) {
2918  object_size *= kPointerSize;
2919  }
2920  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
2921 
2922  // Check relative positions of allocation top and limit addresses.
2923  // ARM adds additional checks to make sure the ldm instruction can be
2924  // used. On MIPS we don't have ldm so we don't need additional checks either.
2925  ExternalReference new_space_allocation_top =
2926  ExternalReference::new_space_allocation_top_address(isolate());
2927  ExternalReference new_space_allocation_limit =
2928  ExternalReference::new_space_allocation_limit_address(isolate());
2929  intptr_t top =
2930  reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2931  intptr_t limit =
2932  reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2933  ASSERT((limit - top) == kPointerSize);
2934 
2935  // Set up allocation top address and object size registers.
2936  Register topaddr = scratch1;
2937  Register obj_size_reg = scratch2;
2938  li(topaddr, Operand(new_space_allocation_top));
2939  li(obj_size_reg, Operand(object_size));
2940 
2941  // This code stores a temporary value in t9.
2942  if ((flags & RESULT_CONTAINS_TOP) == 0) {
2943  // Load allocation top into result and allocation limit into t9.
2944  lw(result, MemOperand(topaddr));
2945  lw(t9, MemOperand(topaddr, kPointerSize));
2946  } else {
2947  if (emit_debug_code()) {
2948  // Assert that result actually contains top on entry. t9 is used
2949  // immediately below so this use of t9 does not cause difference with
2950  // respect to register content between debug and release mode.
2951  lw(t9, MemOperand(topaddr));
2952  Check(eq, "Unexpected allocation top", result, Operand(t9));
2953  }
2954  // Load allocation limit into t9. Result already contains allocation top.
2955  lw(t9, MemOperand(topaddr, limit - top));
2956  }
2957 
2958  // Calculate new top and bail out if new space is exhausted. Use result
2959  // to calculate the new top.
2960  Addu(scratch2, result, Operand(obj_size_reg));
2961  Branch(gc_required, Ugreater, scratch2, Operand(t9));
2962  sw(scratch2, MemOperand(topaddr));
2963 
2964  // Tag object if requested.
2965  if ((flags & TAG_OBJECT) != 0) {
2966  Addu(result, result, Operand(kHeapObjectTag));
2967  }
2968 }
2969 
2970 
2971 void MacroAssembler::AllocateInNewSpace(Register object_size,
2972  Register result,
2973  Register scratch1,
2974  Register scratch2,
2975  Label* gc_required,
2976  AllocationFlags flags) {
2977  if (!FLAG_inline_new) {
2978  if (emit_debug_code()) {
2979  // Trash the registers to simulate an allocation failure.
2980  li(result, 0x7091);
2981  li(scratch1, 0x7191);
2982  li(scratch2, 0x7291);
2983  }
2984  jmp(gc_required);
2985  return;
2986  }
2987 
2988  ASSERT(!result.is(scratch1));
2989  ASSERT(!result.is(scratch2));
2990  ASSERT(!scratch1.is(scratch2));
2991  ASSERT(!object_size.is(t9));
2992  ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2993 
2994  // Check relative positions of allocation top and limit addresses.
2995  // ARM adds additional checks to make sure the ldm instruction can be
2996  // used. On MIPS we don't have ldm so we don't need additional checks either.
2997  ExternalReference new_space_allocation_top =
2998  ExternalReference::new_space_allocation_top_address(isolate());
2999  ExternalReference new_space_allocation_limit =
3000  ExternalReference::new_space_allocation_limit_address(isolate());
3001  intptr_t top =
3002  reinterpret_cast<intptr_t>(new_space_allocation_top.address());
3003  intptr_t limit =
3004  reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
3005  ASSERT((limit - top) == kPointerSize);
3006 
3007  // Set up allocation top address and object size registers.
3008  Register topaddr = scratch1;
3009  li(topaddr, Operand(new_space_allocation_top));
3010 
3011  // This code stores a temporary value in t9.
3012  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3013  // Load allocation top into result and allocation limit into t9.
3014  lw(result, MemOperand(topaddr));
3015  lw(t9, MemOperand(topaddr, kPointerSize));
3016  } else {
3017  if (emit_debug_code()) {
3018  // Assert that result actually contains top on entry. t9 is used
3019  // immediately below so this use of t9 does not cause difference with
3020  // respect to register content between debug and release mode.
3021  lw(t9, MemOperand(topaddr));
3022  Check(eq, "Unexpected allocation top", result, Operand(t9));
3023  }
3024  // Load allocation limit into t9. Result already contains allocation top.
3025  lw(t9, MemOperand(topaddr, limit - top));
3026  }
3027 
3028  // Calculate new top and bail out if new space is exhausted. Use result
3029  // to calculate the new top. Object size may be in words so a shift is
3030  // required to get the number of bytes.
3031  if ((flags & SIZE_IN_WORDS) != 0) {
3032  sll(scratch2, object_size, kPointerSizeLog2);
3033  Addu(scratch2, result, scratch2);
3034  } else {
3035  Addu(scratch2, result, Operand(object_size));
3036  }
3037  Branch(gc_required, Ugreater, scratch2, Operand(t9));
3038 
3039  // Update allocation top. result temporarily holds the new top.
3040  if (emit_debug_code()) {
3041  And(t9, scratch2, Operand(kObjectAlignmentMask));
3042  Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
3043  }
3044  sw(scratch2, MemOperand(topaddr));
3045 
3046  // Tag object if requested.
3047  if ((flags & TAG_OBJECT) != 0) {
3048  Addu(result, result, Operand(kHeapObjectTag));
3049  }
3050 }
3051 
3052 
3053 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3054  Register scratch) {
3055  ExternalReference new_space_allocation_top =
3056  ExternalReference::new_space_allocation_top_address(isolate());
3057 
3058  // Make sure the object has no tag before resetting top.
3059  And(object, object, Operand(~kHeapObjectTagMask));
3060 #ifdef DEBUG
3061  // Check that the object un-allocated is below the current top.
3062  li(scratch, Operand(new_space_allocation_top));
3063  lw(scratch, MemOperand(scratch));
3064  Check(less, "Undo allocation of non allocated memory",
3065  object, Operand(scratch));
3066 #endif
3067  // Write the address of the object to un-allocate as the current top.
3068  li(scratch, Operand(new_space_allocation_top));
3069  sw(object, MemOperand(scratch));
3070 }
3071 
3072 
3073 void MacroAssembler::AllocateTwoByteString(Register result,
3074  Register length,
3075  Register scratch1,
3076  Register scratch2,
3077  Register scratch3,
3078  Label* gc_required) {
3079  // Calculate the number of bytes needed for the characters in the string while
3080  // observing object alignment.
3081  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3082  sll(scratch1, length, 1); // Length in bytes, not chars.
3083  addiu(scratch1, scratch1,
3084  kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3085  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3086 
3087  // Allocate two-byte string in new space.
3088  AllocateInNewSpace(scratch1,
3089  result,
3090  scratch2,
3091  scratch3,
3092  gc_required,
3093  TAG_OBJECT);
3094 
3095  // Set the map, length and hash field.
3096  InitializeNewString(result,
3097  length,
3098  Heap::kStringMapRootIndex,
3099  scratch1,
3100  scratch2);
3101 }
3102 
3103 
3104 void MacroAssembler::AllocateAsciiString(Register result,
3105  Register length,
3106  Register scratch1,
3107  Register scratch2,
3108  Register scratch3,
3109  Label* gc_required) {
3110  // Calculate the number of bytes needed for the characters in the string
3111  // while observing object alignment.
3112  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
3113  ASSERT(kCharSize == 1);
3114  addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
3115  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3116 
3117  // Allocate ASCII string in new space.
3118  AllocateInNewSpace(scratch1,
3119  result,
3120  scratch2,
3121  scratch3,
3122  gc_required,
3123  TAG_OBJECT);
3124 
3125  // Set the map, length and hash field.
3126  InitializeNewString(result,
3127  length,
3128  Heap::kAsciiStringMapRootIndex,
3129  scratch1,
3130  scratch2);
3131 }
3132 
3133 
3134 void MacroAssembler::AllocateTwoByteConsString(Register result,
3135  Register length,
3136  Register scratch1,
3137  Register scratch2,
3138  Label* gc_required) {
3139  AllocateInNewSpace(ConsString::kSize,
3140  result,
3141  scratch1,
3142  scratch2,
3143  gc_required,
3144  TAG_OBJECT);
3145  InitializeNewString(result,
3146  length,
3147  Heap::kConsStringMapRootIndex,
3148  scratch1,
3149  scratch2);
3150 }
3151 
3152 
3153 void MacroAssembler::AllocateAsciiConsString(Register result,
3154  Register length,
3155  Register scratch1,
3156  Register scratch2,
3157  Label* gc_required) {
3158  AllocateInNewSpace(ConsString::kSize,
3159  result,
3160  scratch1,
3161  scratch2,
3162  gc_required,
3163  TAG_OBJECT);
3164  InitializeNewString(result,
3165  length,
3166  Heap::kConsAsciiStringMapRootIndex,
3167  scratch1,
3168  scratch2);
3169 }
3170 
3171 
3172 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3173  Register length,
3174  Register scratch1,
3175  Register scratch2,
3176  Label* gc_required) {
3177  AllocateInNewSpace(SlicedString::kSize,
3178  result,
3179  scratch1,
3180  scratch2,
3181  gc_required,
3182  TAG_OBJECT);
3183 
3184  InitializeNewString(result,
3185  length,
3186  Heap::kSlicedStringMapRootIndex,
3187  scratch1,
3188  scratch2);
3189 }
3190 
3191 
3192 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3193  Register length,
3194  Register scratch1,
3195  Register scratch2,
3196  Label* gc_required) {
3197  AllocateInNewSpace(SlicedString::kSize,
3198  result,
3199  scratch1,
3200  scratch2,
3201  gc_required,
3202  TAG_OBJECT);
3203 
3204  InitializeNewString(result,
3205  length,
3206  Heap::kSlicedAsciiStringMapRootIndex,
3207  scratch1,
3208  scratch2);
3209 }
3210 
3211 
3212 // Allocates a heap number or jumps to the label if the young space is full and
3213 // a scavenge is needed.
3214 void MacroAssembler::AllocateHeapNumber(Register result,
3215  Register scratch1,
3216  Register scratch2,
3217  Register heap_number_map,
3218  Label* need_gc) {
3219  // Allocate an object in the heap for the heap number and tag it as a heap
3220  // object.
3221  AllocateInNewSpace(HeapNumber::kSize,
3222  result,
3223  scratch1,
3224  scratch2,
3225  need_gc,
3226  TAG_OBJECT);
3227 
3228  // Store heap number map in the allocated object.
3229  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3230  sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3231 }
3232 
3233 
3234 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3235  FPURegister value,
3236  Register scratch1,
3237  Register scratch2,
3238  Label* gc_required) {
3239  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3240  AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3241  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3242 }
3243 
3244 
3245 // Copies a fixed number of fields of heap objects from src to dst.
3246 void MacroAssembler::CopyFields(Register dst,
3247  Register src,
3248  RegList temps,
3249  int field_count) {
3250  ASSERT((temps & dst.bit()) == 0);
3251  ASSERT((temps & src.bit()) == 0);
3252  // Primitive implementation using only one temporary register.
3253 
3254  Register tmp = no_reg;
3255  // Find a temp register in temps list.
3256  for (int i = 0; i < kNumRegisters; i++) {
3257  if ((temps & (1 << i)) != 0) {
3258  tmp.code_ = i;
3259  break;
3260  }
3261  }
3262  ASSERT(!tmp.is(no_reg));
3263 
3264  for (int i = 0; i < field_count; i++) {
3265  lw(tmp, FieldMemOperand(src, i * kPointerSize));
3266  sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3267  }
3268 }
3269 
3270 
3271 void MacroAssembler::CopyBytes(Register src,
3272  Register dst,
3273  Register length,
3274  Register scratch) {
3275  Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3276 
3277  // Align src before copying in word size chunks.
3278  bind(&align_loop);
3279  Branch(&done, eq, length, Operand(zero_reg));
3280  bind(&align_loop_1);
3281  And(scratch, src, kPointerSize - 1);
3282  Branch(&word_loop, eq, scratch, Operand(zero_reg));
3283  lbu(scratch, MemOperand(src));
3284  Addu(src, src, 1);
3285  sb(scratch, MemOperand(dst));
3286  Addu(dst, dst, 1);
3287  Subu(length, length, Operand(1));
3288  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3289 
3290  // Copy bytes in word size chunks.
3291  bind(&word_loop);
3292  if (emit_debug_code()) {
3293  And(scratch, src, kPointerSize - 1);
3294  Assert(eq, "Expecting alignment for CopyBytes",
3295  scratch, Operand(zero_reg));
3296  }
3297  Branch(&byte_loop, lt, length, Operand(kPointerSize));
3298  lw(scratch, MemOperand(src));
3299  Addu(src, src, kPointerSize);
3300 
3301  // TODO(kalmard) check if this can be optimized to use sw in most cases.
3302  // Can't use unaligned access - copy byte by byte.
3303  sb(scratch, MemOperand(dst, 0));
3304  srl(scratch, scratch, 8);
3305  sb(scratch, MemOperand(dst, 1));
3306  srl(scratch, scratch, 8);
3307  sb(scratch, MemOperand(dst, 2));
3308  srl(scratch, scratch, 8);
3309  sb(scratch, MemOperand(dst, 3));
3310  Addu(dst, dst, 4);
3311 
3312  Subu(length, length, Operand(kPointerSize));
3313  Branch(&word_loop);
3314 
3315  // Copy the last bytes if any left.
3316  bind(&byte_loop);
3317  Branch(&done, eq, length, Operand(zero_reg));
3318  bind(&byte_loop_1);
3319  lbu(scratch, MemOperand(src));
3320  Addu(src, src, 1);
3321  sb(scratch, MemOperand(dst));
3322  Addu(dst, dst, 1);
3323  Subu(length, length, Operand(1));
3324  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3325  bind(&done);
3326 }
3327 
3328 
3329 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3330  Register end_offset,
3331  Register filler) {
3332  Label loop, entry;
3333  Branch(&entry);
3334  bind(&loop);
3335  sw(filler, MemOperand(start_offset));
3336  Addu(start_offset, start_offset, kPointerSize);
3337  bind(&entry);
3338  Branch(&loop, lt, start_offset, Operand(end_offset));
3339 }
3340 
3341 
3342 void MacroAssembler::CheckFastElements(Register map,
3343  Register scratch,
3344  Label* fail) {
3349  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3350  Branch(fail, hi, scratch,
3351  Operand(Map::kMaximumBitField2FastHoleyElementValue));
3352 }
3353 
3354 
3355 void MacroAssembler::CheckFastObjectElements(Register map,
3356  Register scratch,
3357  Label* fail) {
3362  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3363  Branch(fail, ls, scratch,
3364  Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3365  Branch(fail, hi, scratch,
3366  Operand(Map::kMaximumBitField2FastHoleyElementValue));
3367 }
3368 
3369 
3370 void MacroAssembler::CheckFastSmiElements(Register map,
3371  Register scratch,
3372  Label* fail) {
3375  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3376  Branch(fail, hi, scratch,
3377  Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3378 }
3379 
3380 
3381 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3382  Register key_reg,
3383  Register receiver_reg,
3384  Register elements_reg,
3385  Register scratch1,
3386  Register scratch2,
3387  Register scratch3,
3388  Register scratch4,
3389  Label* fail) {
3390  Label smi_value, maybe_nan, have_double_value, is_nan, done;
3391  Register mantissa_reg = scratch2;
3392  Register exponent_reg = scratch3;
3393 
3394  // Handle smi values specially.
3395  JumpIfSmi(value_reg, &smi_value);
3396 
3397  // Ensure that the object is a heap number
3398  CheckMap(value_reg,
3399  scratch1,
3400  Heap::kHeapNumberMapRootIndex,
3401  fail,
3403 
3404  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3405  // in the exponent.
3406  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3407  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3408  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3409 
3410  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3411 
3412  bind(&have_double_value);
3413  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3414  Addu(scratch1, scratch1, elements_reg);
3415  sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
3416  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
3417  sw(exponent_reg, FieldMemOperand(scratch1, offset));
3418  jmp(&done);
3419 
3420  bind(&maybe_nan);
3421  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3422  // it's an Infinity, and the non-NaN code path applies.
3423  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3424  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3425  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3426  bind(&is_nan);
3427  // Load canonical NaN for storing into the double array.
3428  uint64_t nan_int64 = BitCast<uint64_t>(
3429  FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3430  li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
3431  li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
3432  jmp(&have_double_value);
3433 
3434  bind(&smi_value);
3435  Addu(scratch1, elements_reg,
3436  Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3437  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3438  Addu(scratch1, scratch1, scratch2);
3439  // scratch1 is now effective address of the double element
3440 
3441  FloatingPointHelper::Destination destination;
3442  if (CpuFeatures::IsSupported(FPU)) {
3443  destination = FloatingPointHelper::kFPURegisters;
3444  } else {
3445  destination = FloatingPointHelper::kCoreRegisters;
3446  }
3447 
3448  Register untagged_value = elements_reg;
3449  SmiUntag(untagged_value, value_reg);
3450  FloatingPointHelper::ConvertIntToDouble(this,
3451  untagged_value,
3452  destination,
3453  f0,
3454  mantissa_reg,
3455  exponent_reg,
3456  scratch4,
3457  f2);
3458  if (destination == FloatingPointHelper::kFPURegisters) {
3459  CpuFeatures::Scope scope(FPU);
3460  sdc1(f0, MemOperand(scratch1, 0));
3461  } else {
3462  sw(mantissa_reg, MemOperand(scratch1, 0));
3463  sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
3464  }
3465  bind(&done);
3466 }
3467 
3468 
3469 void MacroAssembler::CompareMapAndBranch(Register obj,
3470  Register scratch,
3471  Handle<Map> map,
3472  Label* early_success,
3473  Condition cond,
3474  Label* branch_to,
3475  CompareMapMode mode) {
3476  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3477  CompareMapAndBranch(scratch, map, early_success, cond, branch_to, mode);
3478 }
3479 
3480 
3481 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3482  Handle<Map> map,
3483  Label* early_success,
3484  Condition cond,
3485  Label* branch_to,
3486  CompareMapMode mode) {
3487  Operand right = Operand(map);
3488  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
3489  ElementsKind kind = map->elements_kind();
3490  if (IsFastElementsKind(kind)) {
3491  bool packed = IsFastPackedElementsKind(kind);
3492  Map* current_map = *map;
3493  while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
3494  kind = GetNextMoreGeneralFastElementsKind(kind, packed);
3495  current_map = current_map->LookupElementsTransitionMap(kind);
3496  if (!current_map) break;
3497  Branch(early_success, eq, obj_map, right);
3498  right = Operand(Handle<Map>(current_map));
3499  }
3500  }
3501  }
3502 
3503  Branch(branch_to, cond, obj_map, right);
3504 }
3505 
3506 
3507 void MacroAssembler::CheckMap(Register obj,
3508  Register scratch,
3509  Handle<Map> map,
3510  Label* fail,
3511  SmiCheckType smi_check_type,
3512  CompareMapMode mode) {
3513  if (smi_check_type == DO_SMI_CHECK) {
3514  JumpIfSmi(obj, fail);
3515  }
3516  Label success;
3517  CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
3518  bind(&success);
3519 }
3520 
3521 
3522 void MacroAssembler::DispatchMap(Register obj,
3523  Register scratch,
3524  Handle<Map> map,
3525  Handle<Code> success,
3526  SmiCheckType smi_check_type) {
3527  Label fail;
3528  if (smi_check_type == DO_SMI_CHECK) {
3529  JumpIfSmi(obj, &fail);
3530  }
3531  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3532  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3533  bind(&fail);
3534 }
3535 
3536 
3537 void MacroAssembler::CheckMap(Register obj,
3538  Register scratch,
3539  Heap::RootListIndex index,
3540  Label* fail,
3541  SmiCheckType smi_check_type) {
3542  if (smi_check_type == DO_SMI_CHECK) {
3543  JumpIfSmi(obj, fail);
3544  }
3545  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3546  LoadRoot(at, index);
3547  Branch(fail, ne, scratch, Operand(at));
3548 }
3549 
3550 
3551 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
3552  CpuFeatures::Scope scope(FPU);
3553  if (IsMipsSoftFloatABI) {
3554  Move(dst, v0, v1);
3555  } else {
3556  Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3557  }
3558 }
3559 
3560 
3561 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3562  CpuFeatures::Scope scope(FPU);
3563  if (!IsMipsSoftFloatABI) {
3564  Move(f12, dreg);
3565  } else {
3566  Move(a0, a1, dreg);
3567  }
3568 }
3569 
3570 
3571 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3572  DoubleRegister dreg2) {
3573  CpuFeatures::Scope scope(FPU);
3574  if (!IsMipsSoftFloatABI) {
3575  if (dreg2.is(f12)) {
3576  ASSERT(!dreg1.is(f14));
3577  Move(f14, dreg2);
3578  Move(f12, dreg1);
3579  } else {
3580  Move(f12, dreg1);
3581  Move(f14, dreg2);
3582  }
3583  } else {
3584  Move(a0, a1, dreg1);
3585  Move(a2, a3, dreg2);
3586  }
3587 }
3588 
3589 
3590 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3591  Register reg) {
3592  CpuFeatures::Scope scope(FPU);
3593  if (!IsMipsSoftFloatABI) {
3594  Move(f12, dreg);
3595  Move(a2, reg);
3596  } else {
3597  Move(a2, reg);
3598  Move(a0, a1, dreg);
3599  }
3600 }
3601 
3602 
3603 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3604  // This macro takes the dst register to make the code more readable
3605  // at the call sites. However, the dst register has to be t1 to
3606  // follow the calling convention which requires the call type to be
3607  // in t1.
3608  ASSERT(dst.is(t1));
3609  if (call_kind == CALL_AS_FUNCTION) {
3610  li(dst, Operand(Smi::FromInt(1)));
3611  } else {
3612  li(dst, Operand(Smi::FromInt(0)));
3613  }
3614 }
3615 
3616 
3617 // -----------------------------------------------------------------------------
3618 // JavaScript invokes.
3619 
3620 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3621  const ParameterCount& actual,
3622  Handle<Code> code_constant,
3623  Register code_reg,
3624  Label* done,
3625  bool* definitely_mismatches,
3626  InvokeFlag flag,
3627  const CallWrapper& call_wrapper,
3628  CallKind call_kind) {
3629  bool definitely_matches = false;
3630  *definitely_mismatches = false;
3631  Label regular_invoke;
3632 
3633  // Check whether the expected and actual arguments count match. If not,
3634  // setup registers according to contract with ArgumentsAdaptorTrampoline:
3635  // a0: actual arguments count
3636  // a1: function (passed through to callee)
3637  // a2: expected arguments count
3638  // a3: callee code entry
3639 
3640  // The code below is made a lot easier because the calling code already sets
3641  // up actual and expected registers according to the contract if values are
3642  // passed in registers.
3643  ASSERT(actual.is_immediate() || actual.reg().is(a0));
3644  ASSERT(expected.is_immediate() || expected.reg().is(a2));
3645  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3646 
3647  if (expected.is_immediate()) {
3648  ASSERT(actual.is_immediate());
3649  if (expected.immediate() == actual.immediate()) {
3650  definitely_matches = true;
3651  } else {
3652  li(a0, Operand(actual.immediate()));
3653  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3654  if (expected.immediate() == sentinel) {
3655  // Don't worry about adapting arguments for builtins that
3656  // don't want that done. Skip adaption code by making it look
3657  // like we have a match between expected and actual number of
3658  // arguments.
3659  definitely_matches = true;
3660  } else {
3661  *definitely_mismatches = true;
3662  li(a2, Operand(expected.immediate()));
3663  }
3664  }
3665  } else if (actual.is_immediate()) {
3666  Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3667  li(a0, Operand(actual.immediate()));
3668  } else {
3669  Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
3670  }
3671 
3672  if (!definitely_matches) {
3673  if (!code_constant.is_null()) {
3674  li(a3, Operand(code_constant));
3675  addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3676  }
3677 
3678  Handle<Code> adaptor =
3679  isolate()->builtins()->ArgumentsAdaptorTrampoline();
3680  if (flag == CALL_FUNCTION) {
3681  call_wrapper.BeforeCall(CallSize(adaptor));
3682  SetCallKind(t1, call_kind);
3683  Call(adaptor);
3684  call_wrapper.AfterCall();
3685  if (!*definitely_mismatches) {
3686  Branch(done);
3687  }
3688  } else {
3689  SetCallKind(t1, call_kind);
3690  Jump(adaptor, RelocInfo::CODE_TARGET);
3691  }
3692  bind(&regular_invoke);
3693  }
3694 }
3695 
3696 
3697 void MacroAssembler::InvokeCode(Register code,
3698  const ParameterCount& expected,
3699  const ParameterCount& actual,
3700  InvokeFlag flag,
3701  const CallWrapper& call_wrapper,
3702  CallKind call_kind) {
3703  // You can't call a function without a valid frame.
3704  ASSERT(flag == JUMP_FUNCTION || has_frame());
3705 
3706  Label done;
3707 
3708  bool definitely_mismatches = false;
3709  InvokePrologue(expected, actual, Handle<Code>::null(), code,
3710  &done, &definitely_mismatches, flag,
3711  call_wrapper, call_kind);
3712  if (!definitely_mismatches) {
3713  if (flag == CALL_FUNCTION) {
3714  call_wrapper.BeforeCall(CallSize(code));
3715  SetCallKind(t1, call_kind);
3716  Call(code);
3717  call_wrapper.AfterCall();
3718  } else {
3719  ASSERT(flag == JUMP_FUNCTION);
3720  SetCallKind(t1, call_kind);
3721  Jump(code);
3722  }
3723  // Continue here if InvokePrologue does handle the invocation due to
3724  // mismatched parameter counts.
3725  bind(&done);
3726  }
3727 }
3728 
3729 
3730 void MacroAssembler::InvokeCode(Handle<Code> code,
3731  const ParameterCount& expected,
3732  const ParameterCount& actual,
3733  RelocInfo::Mode rmode,
3734  InvokeFlag flag,
3735  CallKind call_kind) {
3736  // You can't call a function without a valid frame.
3737  ASSERT(flag == JUMP_FUNCTION || has_frame());
3738 
3739  Label done;
3740 
3741  bool definitely_mismatches = false;
3742  InvokePrologue(expected, actual, code, no_reg,
3743  &done, &definitely_mismatches, flag,
3744  NullCallWrapper(), call_kind);
3745  if (!definitely_mismatches) {
3746  if (flag == CALL_FUNCTION) {
3747  SetCallKind(t1, call_kind);
3748  Call(code, rmode);
3749  } else {
3750  SetCallKind(t1, call_kind);
3751  Jump(code, rmode);
3752  }
3753  // Continue here if InvokePrologue does handle the invocation due to
3754  // mismatched parameter counts.
3755  bind(&done);
3756  }
3757 }
3758 
3759 
3760 void MacroAssembler::InvokeFunction(Register function,
3761  const ParameterCount& actual,
3762  InvokeFlag flag,
3763  const CallWrapper& call_wrapper,
3764  CallKind call_kind) {
3765  // You can't call a function without a valid frame.
3766  ASSERT(flag == JUMP_FUNCTION || has_frame());
3767 
3768  // Contract with called JS functions requires that function is passed in a1.
3769  ASSERT(function.is(a1));
3770  Register expected_reg = a2;
3771  Register code_reg = a3;
3772 
3773  lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3774  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3775  lw(expected_reg,
3776  FieldMemOperand(code_reg,
3777  SharedFunctionInfo::kFormalParameterCountOffset));
3778  sra(expected_reg, expected_reg, kSmiTagSize);
3779  lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3780 
3781  ParameterCount expected(expected_reg);
3782  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
3783 }
3784 
3785 
3786 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3787  const ParameterCount& actual,
3788  InvokeFlag flag,
3789  const CallWrapper& call_wrapper,
3790  CallKind call_kind) {
3791  // You can't call a function without a valid frame.
3792  ASSERT(flag == JUMP_FUNCTION || has_frame());
3793 
3794  // Get the function and setup the context.
3795  LoadHeapObject(a1, function);
3796  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3797 
3798  ParameterCount expected(function->shared()->formal_parameter_count());
3799  // We call indirectly through the code field in the function to
3800  // allow recompilation to take effect without changing any of the
3801  // call sites.
3802  lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3803  InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
3804 }
3805 
3806 
3807 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3808  Register map,
3809  Register scratch,
3810  Label* fail) {
3811  lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3812  IsInstanceJSObjectType(map, scratch, fail);
3813 }
3814 
3815 
3816 void MacroAssembler::IsInstanceJSObjectType(Register map,
3817  Register scratch,
3818  Label* fail) {
3819  lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3820  Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3821  Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3822 }
3823 
3824 
3825 void MacroAssembler::IsObjectJSStringType(Register object,
3826  Register scratch,
3827  Label* fail) {
3828  ASSERT(kNotStringTag != 0);
3829 
3830  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3831  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3832  And(scratch, scratch, Operand(kIsNotStringMask));
3833  Branch(fail, ne, scratch, Operand(zero_reg));
3834 }
3835 
3836 
3837 // ---------------------------------------------------------------------------
3838 // Support functions.
3839 
3840 
3841 void MacroAssembler::TryGetFunctionPrototype(Register function,
3842  Register result,
3843  Register scratch,
3844  Label* miss,
3845  bool miss_on_bound_function) {
3846  // Check that the receiver isn't a smi.
3847  JumpIfSmi(function, miss);
3848 
3849  // Check that the function really is a function. Load map into result reg.
3850  GetObjectType(function, result, scratch);
3851  Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3852 
3853  if (miss_on_bound_function) {
3854  lw(scratch,
3855  FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3856  lw(scratch,
3857  FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3858  And(scratch, scratch,
3859  Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3860  Branch(miss, ne, scratch, Operand(zero_reg));
3861  }
3862 
3863  // Make sure that the function has an instance prototype.
3864  Label non_instance;
3865  lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3866  And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3867  Branch(&non_instance, ne, scratch, Operand(zero_reg));
3868 
3869  // Get the prototype or initial map from the function.
3870  lw(result,
3871  FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3872 
3873  // If the prototype or initial map is the hole, don't return it and
3874  // simply miss the cache instead. This will allow us to allocate a
3875  // prototype object on-demand in the runtime system.
3876  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3877  Branch(miss, eq, result, Operand(t8));
3878 
3879  // If the function does not have an initial map, we're done.
3880  Label done;
3881  GetObjectType(result, scratch, scratch);
3882  Branch(&done, ne, scratch, Operand(MAP_TYPE));
3883 
3884  // Get the prototype from the initial map.
3885  lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3886  jmp(&done);
3887 
3888  // Non-instance prototype: Fetch prototype from constructor field
3889  // in initial map.
3890  bind(&non_instance);
3891  lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3892 
3893  // All done.
3894  bind(&done);
3895 }
3896 
3897 
3898 void MacroAssembler::GetObjectType(Register object,
3899  Register map,
3900  Register type_reg) {
3901  lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3902  lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3903 }
3904 
3905 
3906 // -----------------------------------------------------------------------------
3907 // Runtime calls.
3908 
3909 void MacroAssembler::CallStub(CodeStub* stub,
3910  Condition cond,
3911  Register r1,
3912  const Operand& r2,
3913  BranchDelaySlot bd) {
3914  ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
3915  Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(),
3916  cond, r1, r2, bd);
3917 }
3918 
3919 
3920 void MacroAssembler::TailCallStub(CodeStub* stub) {
3921  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
3922  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
3923 }
3924 
3925 
3926 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3927  return ref0.address() - ref1.address();
3928 }
3929 
3930 
3931 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
3932  int stack_space) {
3933  ExternalReference next_address =
3934  ExternalReference::handle_scope_next_address();
3935  const int kNextOffset = 0;
3936  const int kLimitOffset = AddressOffset(
3937  ExternalReference::handle_scope_limit_address(),
3938  next_address);
3939  const int kLevelOffset = AddressOffset(
3940  ExternalReference::handle_scope_level_address(),
3941  next_address);
3942 
3943  // Allocate HandleScope in callee-save registers.
3944  li(s3, Operand(next_address));
3945  lw(s0, MemOperand(s3, kNextOffset));
3946  lw(s1, MemOperand(s3, kLimitOffset));
3947  lw(s2, MemOperand(s3, kLevelOffset));
3948  Addu(s2, s2, Operand(1));
3949  sw(s2, MemOperand(s3, kLevelOffset));
3950 
3951  // The O32 ABI requires us to pass a pointer in a0 where the returned struct
3952  // (4 bytes) will be placed. This is also built into the Simulator.
3953  // Set up the pointer to the returned value (a0). It was allocated in
3954  // EnterExitFrame.
3955  addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
3956 
3957  // Native call returns to the DirectCEntry stub which redirects to the
3958  // return address pushed on stack (could have moved after GC).
3959  // DirectCEntry stub itself is generated early and never moves.
3960  DirectCEntryStub stub;
3961  stub.GenerateCall(this, function);
3962 
3963  // As mentioned above, on MIPS a pointer is returned - we need to dereference
3964  // it to get the actual return value (which is also a pointer).
3965  lw(v0, MemOperand(v0));
3966 
3967  Label promote_scheduled_exception;
3968  Label delete_allocated_handles;
3969  Label leave_exit_frame;
3970 
3971  // If result is non-zero, dereference to get the result value
3972  // otherwise set it to undefined.
3973  Label skip;
3974  LoadRoot(a0, Heap::kUndefinedValueRootIndex);
3975  Branch(&skip, eq, v0, Operand(zero_reg));
3976  lw(a0, MemOperand(v0));
3977  bind(&skip);
3978  mov(v0, a0);
3979 
3980  // No more valid handles (the result handle was the last one). Restore
3981  // previous handle scope.
3982  sw(s0, MemOperand(s3, kNextOffset));
3983  if (emit_debug_code()) {
3984  lw(a1, MemOperand(s3, kLevelOffset));
3985  Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
3986  }
3987  Subu(s2, s2, Operand(1));
3988  sw(s2, MemOperand(s3, kLevelOffset));
3989  lw(at, MemOperand(s3, kLimitOffset));
3990  Branch(&delete_allocated_handles, ne, s1, Operand(at));
3991 
3992  // Check if the function scheduled an exception.
3993  bind(&leave_exit_frame);
3994  LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3995  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3996  lw(t1, MemOperand(at));
3997  Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3998  li(s0, Operand(stack_space));
3999  LeaveExitFrame(false, s0, true);
4000 
4001  bind(&promote_scheduled_exception);
4002  TailCallExternalReference(
4003  ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4004  0,
4005  1);
4006 
4007  // HandleScope limit has changed. Delete allocated extensions.
4008  bind(&delete_allocated_handles);
4009  sw(s1, MemOperand(s3, kLimitOffset));
4010  mov(s0, v0);
4011  mov(a0, v0);
4012  PrepareCallCFunction(1, s1);
4013  li(a0, Operand(ExternalReference::isolate_address()));
4014  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4015  1);
4016  mov(v0, s0);
4017  jmp(&leave_exit_frame);
4018 }
4019 
4020 
4021 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4022  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
4023  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
4024 }
4025 
4026 
4027 void MacroAssembler::IllegalOperation(int num_arguments) {
4028  if (num_arguments > 0) {
4029  addiu(sp, sp, num_arguments * kPointerSize);
4030  }
4031  LoadRoot(v0, Heap::kUndefinedValueRootIndex);
4032 }
4033 
4034 
4035 void MacroAssembler::IndexFromHash(Register hash,
4036  Register index) {
4037  // If the hash field contains an array index pick it out. The assert checks
4038  // that the constants for the maximum number of digits for an array index
4039  // cached in the hash field and the number of bits reserved for it does not
4040  // conflict.
4041  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
4042  (1 << String::kArrayIndexValueBits));
4043  // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
4044  // the low kHashShift bits.
4045  STATIC_ASSERT(kSmiTag == 0);
4046  Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4047  sll(index, hash, kSmiTagSize);
4048 }
4049 
4050 
4051 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4052  FPURegister result,
4053  Register scratch1,
4054  Register scratch2,
4055  Register heap_number_map,
4056  Label* not_number,
4057  ObjectToDoubleFlags flags) {
4058  Label done;
4059  if ((flags & OBJECT_NOT_SMI) == 0) {
4060  Label not_smi;
4061  JumpIfNotSmi(object, &not_smi);
4062  // Remove smi tag and convert to double.
4063  sra(scratch1, object, kSmiTagSize);
4064  mtc1(scratch1, result);
4065  cvt_d_w(result, result);
4066  Branch(&done);
4067  bind(&not_smi);
4068  }
4069  // Check for heap number and load double value from it.
4070  lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4071  Branch(not_number, ne, scratch1, Operand(heap_number_map));
4072 
4073  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4074  // If exponent is all ones the number is either a NaN or +/-Infinity.
4075  Register exponent = scratch1;
4076  Register mask_reg = scratch2;
4077  lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4078  li(mask_reg, HeapNumber::kExponentMask);
4079 
4080  And(exponent, exponent, mask_reg);
4081  Branch(not_number, eq, exponent, Operand(mask_reg));
4082  }
4083  ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4084  bind(&done);
4085 }
4086 
4087 
4088 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4089  FPURegister value,
4090  Register scratch1) {
4091  sra(scratch1, smi, kSmiTagSize);
4092  mtc1(scratch1, value);
4093  cvt_d_w(value, value);
4094 }
4095 
4096 
4097 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4098  Register left,
4099  Register right,
4100  Register overflow_dst,
4101  Register scratch) {
4102  ASSERT(!dst.is(overflow_dst));
4103  ASSERT(!dst.is(scratch));
4104  ASSERT(!overflow_dst.is(scratch));
4105  ASSERT(!overflow_dst.is(left));
4106  ASSERT(!overflow_dst.is(right));
4107 
4108  if (left.is(right) && dst.is(left)) {
4109  ASSERT(!dst.is(t9));
4110  ASSERT(!scratch.is(t9));
4111  ASSERT(!left.is(t9));
4112  ASSERT(!right.is(t9));
4113  ASSERT(!overflow_dst.is(t9));
4114  mov(t9, right);
4115  right = t9;
4116  }
4117 
4118  if (dst.is(left)) {
4119  mov(scratch, left); // Preserve left.
4120  addu(dst, left, right); // Left is overwritten.
4121  xor_(scratch, dst, scratch); // Original left.
4122  xor_(overflow_dst, dst, right);
4123  and_(overflow_dst, overflow_dst, scratch);
4124  } else if (dst.is(right)) {
4125  mov(scratch, right); // Preserve right.
4126  addu(dst, left, right); // Right is overwritten.
4127  xor_(scratch, dst, scratch); // Original right.
4128  xor_(overflow_dst, dst, left);
4129  and_(overflow_dst, overflow_dst, scratch);
4130  } else {
4131  addu(dst, left, right);
4132  xor_(overflow_dst, dst, left);
4133  xor_(scratch, dst, right);
4134  and_(overflow_dst, scratch, overflow_dst);
4135  }
4136 }
4137 
4138 
4139 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4140  Register left,
4141  Register right,
4142  Register overflow_dst,
4143  Register scratch) {
4144  ASSERT(!dst.is(overflow_dst));
4145  ASSERT(!dst.is(scratch));
4146  ASSERT(!overflow_dst.is(scratch));
4147  ASSERT(!overflow_dst.is(left));
4148  ASSERT(!overflow_dst.is(right));
4149  ASSERT(!scratch.is(left));
4150  ASSERT(!scratch.is(right));
4151 
4152  // This happens with some crankshaft code. Since Subu works fine if
4153  // left == right, let's not make that restriction here.
4154  if (left.is(right)) {
4155  mov(dst, zero_reg);
4156  mov(overflow_dst, zero_reg);
4157  return;
4158  }
4159 
4160  if (dst.is(left)) {
4161  mov(scratch, left); // Preserve left.
4162  subu(dst, left, right); // Left is overwritten.
4163  xor_(overflow_dst, dst, scratch); // scratch is original left.
4164  xor_(scratch, scratch, right); // scratch is original left.
4165  and_(overflow_dst, scratch, overflow_dst);
4166  } else if (dst.is(right)) {
4167  mov(scratch, right); // Preserve right.
4168  subu(dst, left, right); // Right is overwritten.
4169  xor_(overflow_dst, dst, left);
4170  xor_(scratch, left, scratch); // Original right.
4171  and_(overflow_dst, scratch, overflow_dst);
4172  } else {
4173  subu(dst, left, right);
4174  xor_(overflow_dst, dst, left);
4175  xor_(scratch, left, right);
4176  and_(overflow_dst, scratch, overflow_dst);
4177  }
4178 }
4179 
4180 
4181 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4182  int num_arguments) {
4183  // All parameters are on the stack. v0 has the return value after call.
4184 
4185  // If the expected number of arguments of the runtime function is
4186  // constant, we check that the actual number of arguments match the
4187  // expectation.
4188  if (f->nargs >= 0 && f->nargs != num_arguments) {
4189  IllegalOperation(num_arguments);
4190  return;
4191  }
4192 
4193  // TODO(1236192): Most runtime routines don't need the number of
4194  // arguments passed in because it is constant. At some point we
4195  // should remove this need and make the runtime routine entry code
4196  // smarter.
4197  PrepareCEntryArgs(num_arguments);
4198  PrepareCEntryFunction(ExternalReference(f, isolate()));
4199  CEntryStub stub(1);
4200  CallStub(&stub);
4201 }
4202 
4203 
4204 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
4205  const Runtime::Function* function = Runtime::FunctionForId(id);
4206  PrepareCEntryArgs(function->nargs);
4207  PrepareCEntryFunction(ExternalReference(function, isolate()));
4208  CEntryStub stub(1, kSaveFPRegs);
4209  CallStub(&stub);
4210 }
4211 
4212 
4213 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
4214  CallRuntime(Runtime::FunctionForId(fid), num_arguments);
4215 }
4216 
4217 
4218 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4219  int num_arguments,
4220  BranchDelaySlot bd) {
4221  PrepareCEntryArgs(num_arguments);
4222  PrepareCEntryFunction(ext);
4223 
4224  CEntryStub stub(1);
4225  CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
4226 }
4227 
4228 
4229 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4230  int num_arguments,
4231  int result_size) {
4232  // TODO(1236192): Most runtime routines don't need the number of
4233  // arguments passed in because it is constant. At some point we
4234  // should remove this need and make the runtime routine entry code
4235  // smarter.
4236  PrepareCEntryArgs(num_arguments);
4237  JumpToExternalReference(ext);
4238 }
4239 
4240 
4241 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4242  int num_arguments,
4243  int result_size) {
4244  TailCallExternalReference(ExternalReference(fid, isolate()),
4245  num_arguments,
4246  result_size);
4247 }
4248 
4249 
4250 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4251  BranchDelaySlot bd) {
4252  PrepareCEntryFunction(builtin);
4253  CEntryStub stub(1);
4254  Jump(stub.GetCode(),
4255  RelocInfo::CODE_TARGET,
4256  al,
4257  zero_reg,
4258  Operand(zero_reg),
4259  bd);
4260 }
4261 
4262 
4263 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4264  InvokeFlag flag,
4265  const CallWrapper& call_wrapper) {
4266  // You can't call a builtin without a valid frame.
4267  ASSERT(flag == JUMP_FUNCTION || has_frame());
4268 
4269  GetBuiltinEntry(t9, id);
4270  if (flag == CALL_FUNCTION) {
4271  call_wrapper.BeforeCall(CallSize(t9));
4272  SetCallKind(t1, CALL_AS_METHOD);
4273  Call(t9);
4274  call_wrapper.AfterCall();
4275  } else {
4276  ASSERT(flag == JUMP_FUNCTION);
4277  SetCallKind(t1, CALL_AS_METHOD);
4278  Jump(t9);
4279  }
4280 }
4281 
4282 
4283 void MacroAssembler::GetBuiltinFunction(Register target,
4284  Builtins::JavaScript id) {
4285  // Load the builtins object into target register.
4286  lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4287  lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4288  // Load the JavaScript builtin function from the builtins object.
4289  lw(target, FieldMemOperand(target,
4290  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4291 }
4292 
4293 
4294 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4295  ASSERT(!target.is(a1));
4296  GetBuiltinFunction(a1, id);
4297  // Load the code entry point from the builtins object.
4298  lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4299 }
4300 
4301 
4302 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4303  Register scratch1, Register scratch2) {
4304  if (FLAG_native_code_counters && counter->Enabled()) {
4305  li(scratch1, Operand(value));
4306  li(scratch2, Operand(ExternalReference(counter)));
4307  sw(scratch1, MemOperand(scratch2));
4308  }
4309 }
4310 
4311 
4312 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4313  Register scratch1, Register scratch2) {
4314  ASSERT(value > 0);
4315  if (FLAG_native_code_counters && counter->Enabled()) {
4316  li(scratch2, Operand(ExternalReference(counter)));
4317  lw(scratch1, MemOperand(scratch2));
4318  Addu(scratch1, scratch1, Operand(value));
4319  sw(scratch1, MemOperand(scratch2));
4320  }
4321 }
4322 
4323 
4324 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4325  Register scratch1, Register scratch2) {
4326  ASSERT(value > 0);
4327  if (FLAG_native_code_counters && counter->Enabled()) {
4328  li(scratch2, Operand(ExternalReference(counter)));
4329  lw(scratch1, MemOperand(scratch2));
4330  Subu(scratch1, scratch1, Operand(value));
4331  sw(scratch1, MemOperand(scratch2));
4332  }
4333 }
4334 
4335 
4336 // -----------------------------------------------------------------------------
4337 // Debugging.
4338 
4339 void MacroAssembler::Assert(Condition cc, const char* msg,
4340  Register rs, Operand rt) {
4341  if (emit_debug_code())
4342  Check(cc, msg, rs, rt);
4343 }
4344 
4345 
4346 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4347  Heap::RootListIndex index) {
4348  if (emit_debug_code()) {
4349  LoadRoot(at, index);
4350  Check(eq, "Register did not match expected root", reg, Operand(at));
4351  }
4352 }
4353 
4354 
4355 void MacroAssembler::AssertFastElements(Register elements) {
4356  if (emit_debug_code()) {
4357  ASSERT(!elements.is(at));
4358  Label ok;
4359  push(elements);
4360  lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4361  LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4362  Branch(&ok, eq, elements, Operand(at));
4363  LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4364  Branch(&ok, eq, elements, Operand(at));
4365  LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4366  Branch(&ok, eq, elements, Operand(at));
4367  Abort("JSObject with fast elements map has slow elements");
4368  bind(&ok);
4369  pop(elements);
4370  }
4371 }
4372 
4373 
4374 void MacroAssembler::Check(Condition cc, const char* msg,
4375  Register rs, Operand rt) {
4376  Label L;
4377  Branch(&L, cc, rs, rt);
4378  Abort(msg);
4379  // Will not return here.
4380  bind(&L);
4381 }
4382 
4383 
4384 void MacroAssembler::Abort(const char* msg) {
4385  Label abort_start;
4386  bind(&abort_start);
4387  // We want to pass the msg string like a smi to avoid GC
4388  // problems, however msg is not guaranteed to be aligned
4389  // properly. Instead, we pass an aligned pointer that is
4390  // a proper v8 smi, but also pass the alignment difference
4391  // from the real pointer as a smi.
4392  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
4393  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
4394  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4395 #ifdef DEBUG
4396  if (msg != NULL) {
4397  RecordComment("Abort message: ");
4398  RecordComment(msg);
4399  }
4400 #endif
4401 
4402  li(a0, Operand(p0));
4403  push(a0);
4404  li(a0, Operand(Smi::FromInt(p1 - p0)));
4405  push(a0);
4406  // Disable stub call restrictions to always allow calls to abort.
4407  if (!has_frame_) {
4408  // We don't actually want to generate a pile of code for this, so just
4409  // claim there is a stack frame, without generating one.
4410  FrameScope scope(this, StackFrame::NONE);
4411  CallRuntime(Runtime::kAbort, 2);
4412  } else {
4413  CallRuntime(Runtime::kAbort, 2);
4414  }
4415  // Will not return here.
4416  if (is_trampoline_pool_blocked()) {
4417  // If the calling code cares about the exact number of
4418  // instructions generated, we insert padding here to keep the size
4419  // of the Abort macro constant.
4420  // Currently in debug mode with debug_code enabled the number of
4421  // generated instructions is 14, so we use this as a maximum value.
4422  static const int kExpectedAbortInstructions = 14;
4423  int abort_instructions = InstructionsGeneratedSince(&abort_start);
4424  ASSERT(abort_instructions <= kExpectedAbortInstructions);
4425  while (abort_instructions++ < kExpectedAbortInstructions) {
4426  nop();
4427  }
4428  }
4429 }
4430 
4431 
4432 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4433  if (context_chain_length > 0) {
4434  // Move up the chain of contexts to the context containing the slot.
4435  lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4436  for (int i = 1; i < context_chain_length; i++) {
4437  lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4438  }
4439  } else {
4440  // Slot is in the current function context. Move it into the
4441  // destination register in case we store into it (the write barrier
4442  // cannot be allowed to destroy the context in esi).
4443  Move(dst, cp);
4444  }
4445 }
4446 
4447 
4448 void MacroAssembler::LoadTransitionedArrayMapConditional(
4449  ElementsKind expected_kind,
4450  ElementsKind transitioned_kind,
4451  Register map_in_out,
4452  Register scratch,
4453  Label* no_map_match) {
4454  // Load the global or builtins object from the current context.
4455  lw(scratch,
4456  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4457  lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4458 
4459  // Check that the function's map is the same as the expected cached map.
4460  lw(scratch,
4461  MemOperand(scratch,
4462  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4463  size_t offset = expected_kind * kPointerSize +
4464  FixedArrayBase::kHeaderSize;
4465  lw(at, FieldMemOperand(scratch, offset));
4466  Branch(no_map_match, ne, map_in_out, Operand(at));
4467 
4468  // Use the transitioned cached map.
4469  offset = transitioned_kind * kPointerSize +
4470  FixedArrayBase::kHeaderSize;
4471  lw(map_in_out, FieldMemOperand(scratch, offset));
4472 }
4473 
4474 
4475 void MacroAssembler::LoadInitialArrayMap(
4476  Register function_in, Register scratch,
4477  Register map_out, bool can_have_holes) {
4478  ASSERT(!function_in.is(map_out));
4479  Label done;
4480  lw(map_out, FieldMemOperand(function_in,
4481  JSFunction::kPrototypeOrInitialMapOffset));
4482  if (!FLAG_smi_only_arrays) {
4483  ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4484  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4485  kind,
4486  map_out,
4487  scratch,
4488  &done);
4489  } else if (can_have_holes) {
4490  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4492  map_out,
4493  scratch,
4494  &done);
4495  }
4496  bind(&done);
4497 }
4498 
4499 
4500 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4501  // Load the global or builtins object from the current context.
4502  lw(function,
4503  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4504  // Load the native context from the global or builtins object.
4505  lw(function, FieldMemOperand(function,
4506  GlobalObject::kNativeContextOffset));
4507  // Load the function from the native context.
4508  lw(function, MemOperand(function, Context::SlotOffset(index)));
4509 }
4510 
4511 
4512 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4513  Register map,
4514  Register scratch) {
4515  // Load the initial map. The global functions all have initial maps.
4516  lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4517  if (emit_debug_code()) {
4518  Label ok, fail;
4519  CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4520  Branch(&ok);
4521  bind(&fail);
4522  Abort("Global functions must have initial map");
4523  bind(&ok);
4524  }
4525 }
4526 
4527 
4528 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4529  addiu(sp, sp, -5 * kPointerSize);
4530  li(t8, Operand(Smi::FromInt(type)));
4531  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4532  sw(ra, MemOperand(sp, 4 * kPointerSize));
4533  sw(fp, MemOperand(sp, 3 * kPointerSize));
4534  sw(cp, MemOperand(sp, 2 * kPointerSize));
4535  sw(t8, MemOperand(sp, 1 * kPointerSize));
4536  sw(t9, MemOperand(sp, 0 * kPointerSize));
4537  addiu(fp, sp, 3 * kPointerSize);
4538 }
4539 
4540 
4541 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4542  mov(sp, fp);
4543  lw(fp, MemOperand(sp, 0 * kPointerSize));
4544  lw(ra, MemOperand(sp, 1 * kPointerSize));
4545  addiu(sp, sp, 2 * kPointerSize);
4546 }
4547 
4548 
4549 void MacroAssembler::EnterExitFrame(bool save_doubles,
4550  int stack_space) {
4551  // Set up the frame structure on the stack.
4552  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4553  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4554  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4555 
4556  // This is how the stack will look:
4557  // fp + 2 (==kCallerSPDisplacement) - old stack's end
4558  // [fp + 1 (==kCallerPCOffset)] - saved old ra
4559  // [fp + 0 (==kCallerFPOffset)] - saved old fp
4560  // [fp - 1 (==kSPOffset)] - sp of the called function
4561  // [fp - 2 (==kCodeOffset)] - CodeObject
4562  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4563  // new stack (will contain saved ra)
4564 
4565  // Save registers.
4566  addiu(sp, sp, -4 * kPointerSize);
4567  sw(ra, MemOperand(sp, 3 * kPointerSize));
4568  sw(fp, MemOperand(sp, 2 * kPointerSize));
4569  addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4570 
4571  if (emit_debug_code()) {
4572  sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4573  }
4574 
4575  // Accessed from ExitFrame::code_slot.
4576  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4577  sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4578 
4579  // Save the frame pointer and the context in top.
4580  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4581  sw(fp, MemOperand(t8));
4582  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4583  sw(cp, MemOperand(t8));
4584 
4585  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4586  if (save_doubles) {
4587  // The stack must be allign to 0 modulo 8 for stores with sdc1.
4588  ASSERT(kDoubleSize == frame_alignment);
4589  if (frame_alignment > 0) {
4590  ASSERT(IsPowerOf2(frame_alignment));
4591  And(sp, sp, Operand(-frame_alignment)); // Align stack.
4592  }
4594  Subu(sp, sp, Operand(space));
4595  // Remember: we only need to save every 2nd double FPU value.
4596  for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4597  FPURegister reg = FPURegister::from_code(i);
4598  sdc1(reg, MemOperand(sp, i * kDoubleSize));
4599  }
4600  }
4601 
4602  // Reserve place for the return address, stack space and an optional slot
4603  // (used by the DirectCEntryStub to hold the return value if a struct is
4604  // returned) and align the frame preparing for calling the runtime function.
4605  ASSERT(stack_space >= 0);
4606  Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4607  if (frame_alignment > 0) {
4608  ASSERT(IsPowerOf2(frame_alignment));
4609  And(sp, sp, Operand(-frame_alignment)); // Align stack.
4610  }
4611 
4612  // Set the exit frame sp value to point just before the return address
4613  // location.
4614  addiu(at, sp, kPointerSize);
4615  sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4616 }
4617 
4618 
4619 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4620  Register argument_count,
4621  bool do_return) {
4622  // Optionally restore all double registers.
4623  if (save_doubles) {
4624  // Remember: we only need to restore every 2nd double FPU value.
4625  lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4626  for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4627  FPURegister reg = FPURegister::from_code(i);
4628  ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4629  }
4630  }
4631 
4632  // Clear top frame.
4633  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4634  sw(zero_reg, MemOperand(t8));
4635 
4636  // Restore current context from top and clear it in debug mode.
4637  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4638  lw(cp, MemOperand(t8));
4639 #ifdef DEBUG
4640  sw(a3, MemOperand(t8));
4641 #endif
4642 
4643  // Pop the arguments, restore registers, and return.
4644  mov(sp, fp); // Respect ABI stack constraint.
4645  lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4646  lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4647 
4648  if (argument_count.is_valid()) {
4649  sll(t8, argument_count, kPointerSizeLog2);
4650  addu(sp, sp, t8);
4651  }
4652 
4653  if (do_return) {
4654  Ret(USE_DELAY_SLOT);
4655  // If returning, the instruction in the delay slot will be the addiu below.
4656  }
4657  addiu(sp, sp, 8);
4658 }
4659 
4660 
4661 void MacroAssembler::InitializeNewString(Register string,
4662  Register length,
4663  Heap::RootListIndex map_index,
4664  Register scratch1,
4665  Register scratch2) {
4666  sll(scratch1, length, kSmiTagSize);
4667  LoadRoot(scratch2, map_index);
4668  sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4669  li(scratch1, Operand(String::kEmptyHashField));
4670  sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4671  sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4672 }
4673 
4674 
4675 int MacroAssembler::ActivationFrameAlignment() {
4676 #if defined(V8_HOST_ARCH_MIPS)
4677  // Running on the real platform. Use the alignment as mandated by the local
4678  // environment.
4679  // Note: This will break if we ever start generating snapshots on one Mips
4680  // platform for another Mips platform with a different alignment.
4681  return OS::ActivationFrameAlignment();
4682 #else // defined(V8_HOST_ARCH_MIPS)
4683  // If we are using the simulator then we should always align to the expected
4684  // alignment. As the simulator is used to generate snapshots we do not know
4685  // if the target platform will need alignment, so this is controlled from a
4686  // flag.
4687  return FLAG_sim_stack_alignment;
4688 #endif // defined(V8_HOST_ARCH_MIPS)
4689 }
4690 
4691 
4692 void MacroAssembler::AssertStackIsAligned() {
4693  if (emit_debug_code()) {
4694  const int frame_alignment = ActivationFrameAlignment();
4695  const int frame_alignment_mask = frame_alignment - 1;
4696 
4697  if (frame_alignment > kPointerSize) {
4698  Label alignment_as_expected;
4699  ASSERT(IsPowerOf2(frame_alignment));
4700  andi(at, sp, frame_alignment_mask);
4701  Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4702  // Don't use Check here, as it will call Runtime_Abort re-entering here.
4703  stop("Unexpected stack alignment");
4704  bind(&alignment_as_expected);
4705  }
4706  }
4707 }
4708 
4709 
4710 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4711  Register reg,
4712  Register scratch,
4713  Label* not_power_of_two_or_zero) {
4714  Subu(scratch, reg, Operand(1));
4715  Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4716  scratch, Operand(zero_reg));
4717  and_(at, scratch, reg); // In the delay slot.
4718  Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4719 }
4720 
4721 
4722 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4723  ASSERT(!reg.is(overflow));
4724  mov(overflow, reg); // Save original value.
4725  SmiTag(reg);
4726  xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
4727 }
4728 
4729 
4730 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4731  Register src,
4732  Register overflow) {
4733  if (dst.is(src)) {
4734  // Fall back to slower case.
4735  SmiTagCheckOverflow(dst, overflow);
4736  } else {
4737  ASSERT(!dst.is(src));
4738  ASSERT(!dst.is(overflow));
4739  ASSERT(!src.is(overflow));
4740  SmiTag(dst, src);
4741  xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
4742  }
4743 }
4744 
4745 
4746 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4747  Register src,
4748  Label* smi_case) {
4749  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4750  SmiUntag(dst, src);
4751 }
4752 
4753 
4754 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4755  Register src,
4756  Label* non_smi_case) {
4757  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4758  SmiUntag(dst, src);
4759 }
4760 
4761 void MacroAssembler::JumpIfSmi(Register value,
4762  Label* smi_label,
4763  Register scratch,
4764  BranchDelaySlot bd) {
4765  ASSERT_EQ(0, kSmiTag);
4766  andi(scratch, value, kSmiTagMask);
4767  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4768 }
4769 
4770 void MacroAssembler::JumpIfNotSmi(Register value,
4771  Label* not_smi_label,
4772  Register scratch,
4773  BranchDelaySlot bd) {
4774  ASSERT_EQ(0, kSmiTag);
4775  andi(scratch, value, kSmiTagMask);
4776  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4777 }
4778 
4779 
4780 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4781  Register reg2,
4782  Label* on_not_both_smi) {
4783  STATIC_ASSERT(kSmiTag == 0);
4784  ASSERT_EQ(1, kSmiTagMask);
4785  or_(at, reg1, reg2);
4786  JumpIfNotSmi(at, on_not_both_smi);
4787 }
4788 
4789 
4790 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4791  Register reg2,
4792  Label* on_either_smi) {
4793  STATIC_ASSERT(kSmiTag == 0);
4794  ASSERT_EQ(1, kSmiTagMask);
4795  // Both Smi tags must be 1 (not Smi).
4796  and_(at, reg1, reg2);
4797  JumpIfSmi(at, on_either_smi);
4798 }
4799 
4800 
4801 void MacroAssembler::AssertNotSmi(Register object) {
4802  if (emit_debug_code()) {
4803  STATIC_ASSERT(kSmiTag == 0);
4804  andi(at, object, kSmiTagMask);
4805  Check(ne, "Operand is a smi", at, Operand(zero_reg));
4806  }
4807 }
4808 
4809 
4810 void MacroAssembler::AssertSmi(Register object) {
4811  if (emit_debug_code()) {
4812  STATIC_ASSERT(kSmiTag == 0);
4813  andi(at, object, kSmiTagMask);
4814  Check(eq, "Operand is a smi", at, Operand(zero_reg));
4815  }
4816 }
4817 
4818 
4819 void MacroAssembler::AssertString(Register object) {
4820  if (emit_debug_code()) {
4821  STATIC_ASSERT(kSmiTag == 0);
4822  And(t0, object, Operand(kSmiTagMask));
4823  Check(ne, "Operand is a smi and not a string", t0, Operand(zero_reg));
4824  push(object);
4825  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4826  lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4827  Check(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
4828  pop(object);
4829  }
4830 }
4831 
4832 
4833 void MacroAssembler::AssertRootValue(Register src,
4834  Heap::RootListIndex root_value_index,
4835  const char* message) {
4836  if (emit_debug_code()) {
4837  ASSERT(!src.is(at));
4838  LoadRoot(at, root_value_index);
4839  Check(eq, message, src, Operand(at));
4840  }
4841 }
4842 
4843 
4844 void MacroAssembler::JumpIfNotHeapNumber(Register object,
4845  Register heap_number_map,
4846  Register scratch,
4847  Label* on_not_heap_number) {
4848  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4849  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4850  Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4851 }
4852 
4853 
4854 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4855  Register first,
4856  Register second,
4857  Register scratch1,
4858  Register scratch2,
4859  Label* failure) {
4860  // Test that both first and second are sequential ASCII strings.
4861  // Assume that they are non-smis.
4862  lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4863  lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4864  lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4865  lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4866 
4867  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4868  scratch2,
4869  scratch1,
4870  scratch2,
4871  failure);
4872 }
4873 
4874 
4875 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4876  Register second,
4877  Register scratch1,
4878  Register scratch2,
4879  Label* failure) {
4880  // Check that neither is a smi.
4881  STATIC_ASSERT(kSmiTag == 0);
4882  And(scratch1, first, Operand(second));
4883  JumpIfSmi(scratch1, failure);
4884  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4885  second,
4886  scratch1,
4887  scratch2,
4888  failure);
4889 }
4890 
4891 
4892 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4893  Register first,
4894  Register second,
4895  Register scratch1,
4896  Register scratch2,
4897  Label* failure) {
4898  int kFlatAsciiStringMask =
4900  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4901  ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
4902  andi(scratch1, first, kFlatAsciiStringMask);
4903  Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
4904  andi(scratch2, second, kFlatAsciiStringMask);
4905  Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
4906 }
4907 
4908 
4909 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4910  Register scratch,
4911  Label* failure) {
4912  int kFlatAsciiStringMask =
4914  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4915  And(scratch, type, Operand(kFlatAsciiStringMask));
4916  Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
4917 }
4918 
4919 
4920 static const int kRegisterPassedArguments = 4;
4921 
4922 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
4923  int num_double_arguments) {
4924  int stack_passed_words = 0;
4925  num_reg_arguments += 2 * num_double_arguments;
4926 
4927  // Up to four simple arguments are passed in registers a0..a3.
4928  if (num_reg_arguments > kRegisterPassedArguments) {
4929  stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
4930  }
4931  stack_passed_words += kCArgSlotCount;
4932  return stack_passed_words;
4933 }
4934 
4935 
4936 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4937  int num_double_arguments,
4938  Register scratch) {
4939  int frame_alignment = ActivationFrameAlignment();
4940 
4941  // Up to four simple arguments are passed in registers a0..a3.
4942  // Those four arguments must have reserved argument slots on the stack for
4943  // mips, even though those argument slots are not normally used.
4944  // Remaining arguments are pushed on the stack, above (higher address than)
4945  // the argument slots.
4946  int stack_passed_arguments = CalculateStackPassedWords(
4947  num_reg_arguments, num_double_arguments);
4948  if (frame_alignment > kPointerSize) {
4949  // Make stack end at alignment and make room for num_arguments - 4 words
4950  // and the original value of sp.
4951  mov(scratch, sp);
4952  Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4953  ASSERT(IsPowerOf2(frame_alignment));
4954  And(sp, sp, Operand(-frame_alignment));
4955  sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
4956  } else {
4957  Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
4958  }
4959 }
4960 
4961 
4962 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4963  Register scratch) {
4964  PrepareCallCFunction(num_reg_arguments, 0, scratch);
4965 }
4966 
4967 
4968 void MacroAssembler::CallCFunction(ExternalReference function,
4969  int num_reg_arguments,
4970  int num_double_arguments) {
4971  li(t8, Operand(function));
4972  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
4973 }
4974 
4975 
4976 void MacroAssembler::CallCFunction(Register function,
4977  int num_reg_arguments,
4978  int num_double_arguments) {
4979  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
4980 }
4981 
4982 
4983 void MacroAssembler::CallCFunction(ExternalReference function,
4984  int num_arguments) {
4985  CallCFunction(function, num_arguments, 0);
4986 }
4987 
4988 
4989 void MacroAssembler::CallCFunction(Register function,
4990  int num_arguments) {
4991  CallCFunction(function, num_arguments, 0);
4992 }
4993 
4994 
4995 void MacroAssembler::CallCFunctionHelper(Register function,
4996  int num_reg_arguments,
4997  int num_double_arguments) {
4998  ASSERT(has_frame());
4999  // Make sure that the stack is aligned before calling a C function unless
5000  // running in the simulator. The simulator has its own alignment check which
5001  // provides more information.
5002  // The argument stots are presumed to have been set up by
5003  // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5004 
5005 #if defined(V8_HOST_ARCH_MIPS)
5006  if (emit_debug_code()) {
5007  int frame_alignment = OS::ActivationFrameAlignment();
5008  int frame_alignment_mask = frame_alignment - 1;
5009  if (frame_alignment > kPointerSize) {
5010  ASSERT(IsPowerOf2(frame_alignment));
5011  Label alignment_as_expected;
5012  And(at, sp, Operand(frame_alignment_mask));
5013  Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5014  // Don't use Check here, as it will call Runtime_Abort possibly
5015  // re-entering here.
5016  stop("Unexpected alignment in CallCFunction");
5017  bind(&alignment_as_expected);
5018  }
5019  }
5020 #endif // V8_HOST_ARCH_MIPS
5021 
5022  // Just call directly. The function called cannot cause a GC, or
5023  // allow preemption, so the return address in the link register
5024  // stays correct.
5025 
5026  if (!function.is(t9)) {
5027  mov(t9, function);
5028  function = t9;
5029  }
5030 
5031  Call(function);
5032 
5033  int stack_passed_arguments = CalculateStackPassedWords(
5034  num_reg_arguments, num_double_arguments);
5035 
5036  if (OS::ActivationFrameAlignment() > kPointerSize) {
5037  lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5038  } else {
5039  Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5040  }
5041 }
5042 
5043 
5044 #undef BRANCH_ARGS_CHECK
5045 
5046 
5047 void MacroAssembler::PatchRelocatedValue(Register li_location,
5048  Register scratch,
5049  Register new_value) {
5050  lw(scratch, MemOperand(li_location));
5051  // At this point scratch is a lui(at, ...) instruction.
5052  if (emit_debug_code()) {
5053  And(scratch, scratch, kOpcodeMask);
5054  Check(eq, "The instruction to patch should be a lui.",
5055  scratch, Operand(LUI));
5056  lw(scratch, MemOperand(li_location));
5057  }
5058  srl(t9, new_value, kImm16Bits);
5059  Ins(scratch, t9, 0, kImm16Bits);
5060  sw(scratch, MemOperand(li_location));
5061 
5062  lw(scratch, MemOperand(li_location, kInstrSize));
5063  // scratch is now ori(at, ...).
5064  if (emit_debug_code()) {
5065  And(scratch, scratch, kOpcodeMask);
5066  Check(eq, "The instruction to patch should be an ori.",
5067  scratch, Operand(ORI));
5068  lw(scratch, MemOperand(li_location, kInstrSize));
5069  }
5070  Ins(scratch, new_value, 0, kImm16Bits);
5071  sw(scratch, MemOperand(li_location, kInstrSize));
5072 
5073  // Update the I-cache so the new lui and ori can be executed.
5074  FlushICache(li_location, 2);
5075 }
5076 
5077 void MacroAssembler::GetRelocatedValue(Register li_location,
5078  Register value,
5079  Register scratch) {
5080  lw(value, MemOperand(li_location));
5081  if (emit_debug_code()) {
5082  And(value, value, kOpcodeMask);
5083  Check(eq, "The instruction should be a lui.",
5084  value, Operand(LUI));
5085  lw(value, MemOperand(li_location));
5086  }
5087 
5088  // value now holds a lui instruction. Extract the immediate.
5089  sll(value, value, kImm16Bits);
5090 
5091  lw(scratch, MemOperand(li_location, kInstrSize));
5092  if (emit_debug_code()) {
5093  And(scratch, scratch, kOpcodeMask);
5094  Check(eq, "The instruction should be an ori.",
5095  scratch, Operand(ORI));
5096  lw(scratch, MemOperand(li_location, kInstrSize));
5097  }
5098  // "scratch" now holds an ori instruction. Extract the immediate.
5099  andi(scratch, scratch, kImm16Mask);
5100 
5101  // Merge the results.
5102  or_(value, value, scratch);
5103 }
5104 
5105 
5106 void MacroAssembler::CheckPageFlag(
5107  Register object,
5108  Register scratch,
5109  int mask,
5110  Condition cc,
5111  Label* condition_met) {
5112  And(scratch, object, Operand(~Page::kPageAlignmentMask));
5113  lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5114  And(scratch, scratch, Operand(mask));
5115  Branch(condition_met, cc, scratch, Operand(zero_reg));
5116 }
5117 
5118 
5119 void MacroAssembler::JumpIfBlack(Register object,
5120  Register scratch0,
5121  Register scratch1,
5122  Label* on_black) {
5123  HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5124  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5125 }
5126 
5127 
5128 void MacroAssembler::HasColor(Register object,
5129  Register bitmap_scratch,
5130  Register mask_scratch,
5131  Label* has_color,
5132  int first_bit,
5133  int second_bit) {
5134  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5135  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5136 
5137  GetMarkBits(object, bitmap_scratch, mask_scratch);
5138 
5139  Label other_color, word_boundary;
5140  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5141  And(t8, t9, Operand(mask_scratch));
5142  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5143  // Shift left 1 by adding.
5144  Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5145  Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5146  And(t8, t9, Operand(mask_scratch));
5147  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5148  jmp(&other_color);
5149 
5150  bind(&word_boundary);
5151  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5152  And(t9, t9, Operand(1));
5153  Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5154  bind(&other_color);
5155 }
5156 
5157 
5158 // Detect some, but not all, common pointer-free objects. This is used by the
5159 // incremental write barrier which doesn't care about oddballs (they are always
5160 // marked black immediately so this code is not hit).
5161 void MacroAssembler::JumpIfDataObject(Register value,
5162  Register scratch,
5163  Label* not_data_object) {
5164  ASSERT(!AreAliased(value, scratch, t8, no_reg));
5165  Label is_data_object;
5166  lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5167  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5168  Branch(&is_data_object, eq, t8, Operand(scratch));
5170  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5171  // If it's a string and it's not a cons string then it's an object containing
5172  // no GC pointers.
5173  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5174  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5175  Branch(not_data_object, ne, t8, Operand(zero_reg));
5176  bind(&is_data_object);
5177 }
5178 
5179 
5180 void MacroAssembler::GetMarkBits(Register addr_reg,
5181  Register bitmap_reg,
5182  Register mask_reg) {
5183  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5184  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5185  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5186  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5187  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5188  sll(t8, t8, kPointerSizeLog2);
5189  Addu(bitmap_reg, bitmap_reg, t8);
5190  li(t8, Operand(1));
5191  sllv(mask_reg, t8, mask_reg);
5192 }
5193 
5194 
5195 void MacroAssembler::EnsureNotWhite(
5196  Register value,
5197  Register bitmap_scratch,
5198  Register mask_scratch,
5199  Register load_scratch,
5200  Label* value_is_white_and_not_data) {
5201  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5202  GetMarkBits(value, bitmap_scratch, mask_scratch);
5203 
5204  // If the value is black or grey we don't need to do anything.
5205  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5206  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5207  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5208  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5209 
5210  Label done;
5211 
5212  // Since both black and grey have a 1 in the first position and white does
5213  // not have a 1 there we only need to check one bit.
5214  lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5215  And(t8, mask_scratch, load_scratch);
5216  Branch(&done, ne, t8, Operand(zero_reg));
5217 
5218  if (emit_debug_code()) {
5219  // Check for impossible bit pattern.
5220  Label ok;
5221  // sll may overflow, making the check conservative.
5222  sll(t8, mask_scratch, 1);
5223  And(t8, load_scratch, t8);
5224  Branch(&ok, eq, t8, Operand(zero_reg));
5225  stop("Impossible marking bit pattern");
5226  bind(&ok);
5227  }
5228 
5229  // Value is white. We check whether it is data that doesn't need scanning.
5230  // Currently only checks for HeapNumber and non-cons strings.
5231  Register map = load_scratch; // Holds map while checking type.
5232  Register length = load_scratch; // Holds length of object after testing type.
5233  Label is_data_object;
5234 
5235  // Check for heap-number
5236  lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5237  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5238  {
5239  Label skip;
5240  Branch(&skip, ne, t8, Operand(map));
5241  li(length, HeapNumber::kSize);
5242  Branch(&is_data_object);
5243  bind(&skip);
5244  }
5245 
5246  // Check for strings.
5248  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5249  // If it's a string and it's not a cons string then it's an object containing
5250  // no GC pointers.
5251  Register instance_type = load_scratch;
5252  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5253  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5254  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5255  // It's a non-indirect (non-cons and non-slice) string.
5256  // If it's external, the length is just ExternalString::kSize.
5257  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5258  // External strings are the only ones with the kExternalStringTag bit
5259  // set.
5262  And(t8, instance_type, Operand(kExternalStringTag));
5263  {
5264  Label skip;
5265  Branch(&skip, eq, t8, Operand(zero_reg));
5266  li(length, ExternalString::kSize);
5267  Branch(&is_data_object);
5268  bind(&skip);
5269  }
5270 
5271  // Sequential string, either ASCII or UC16.
5272  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5273  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5274  // getting the length multiplied by 2.
5276  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5277  lw(t9, FieldMemOperand(value, String::kLengthOffset));
5278  And(t8, instance_type, Operand(kStringEncodingMask));
5279  {
5280  Label skip;
5281  Branch(&skip, eq, t8, Operand(zero_reg));
5282  srl(t9, t9, 1);
5283  bind(&skip);
5284  }
5285  Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5286  And(length, length, Operand(~kObjectAlignmentMask));
5287 
5288  bind(&is_data_object);
5289  // Value is a data object, and it is white. Mark it black. Since we know
5290  // that the object is white we can make it black by flipping one bit.
5291  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5292  Or(t8, t8, Operand(mask_scratch));
5293  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5294 
5295  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5296  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5297  Addu(t8, t8, Operand(length));
5298  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5299 
5300  bind(&done);
5301 }
5302 
5303 
5304 void MacroAssembler::LoadInstanceDescriptors(Register map,
5305  Register descriptors) {
5306  lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5307 }
5308 
5309 
5310 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5311  lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5312  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5313 }
5314 
5315 
5316 void MacroAssembler::EnumLength(Register dst, Register map) {
5317  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5318  lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5319  And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
5320 }
5321 
5322 
5323 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5324  Register empty_fixed_array_value = t2;
5325  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5326  Label next, start;
5327  mov(a2, a0);
5328 
5329  // Check if the enum length field is properly initialized, indicating that
5330  // there is an enum cache.
5331  lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5332 
5333  EnumLength(a3, a1);
5334  Branch(call_runtime, eq, a3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
5335 
5336  jmp(&start);
5337 
5338  bind(&next);
5339  lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5340 
5341  // For all objects but the receiver, check that the cache is empty.
5342  EnumLength(a3, a1);
5343  Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5344 
5345  bind(&start);
5346 
5347  // Check that there are no elements. Register r2 contains the current JS
5348  // object we've reached through the prototype chain.
5349  lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5350  Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
5351 
5352  lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5353  Branch(&next, ne, a2, Operand(null_value));
5354 }
5355 
5356 
5357 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5358  ASSERT(!output_reg.is(input_reg));
5359  Label done;
5360  li(output_reg, Operand(255));
5361  // Normal branch: nop in delay slot.
5362  Branch(&done, gt, input_reg, Operand(output_reg));
5363  // Use delay slot in this branch.
5364  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5365  mov(output_reg, zero_reg); // In delay slot.
5366  mov(output_reg, input_reg); // Value is in range 0..255.
5367  bind(&done);
5368 }
5369 
5370 
5371 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5372  DoubleRegister input_reg,
5373  DoubleRegister temp_double_reg) {
5374  Label above_zero;
5375  Label done;
5376  Label in_bounds;
5377 
5378  Move(temp_double_reg, 0.0);
5379  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5380 
5381  // Double value is less than zero, NaN or Inf, return 0.
5382  mov(result_reg, zero_reg);
5383  Branch(&done);
5384 
5385  // Double value is >= 255, return 255.
5386  bind(&above_zero);
5387  Move(temp_double_reg, 255.0);
5388  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5389  li(result_reg, Operand(255));
5390  Branch(&done);
5391 
5392  // In 0-255 range, round and truncate.
5393  bind(&in_bounds);
5394  cvt_w_d(temp_double_reg, input_reg);
5395  mfc1(result_reg, temp_double_reg);
5396  bind(&done);
5397 }
5398 
5399 
5400 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5401  if (r1.is(r2)) return true;
5402  if (r1.is(r3)) return true;
5403  if (r1.is(r4)) return true;
5404  if (r2.is(r3)) return true;
5405  if (r2.is(r4)) return true;
5406  if (r3.is(r4)) return true;
5407  return false;
5408 }
5409 
5410 
5411 CodePatcher::CodePatcher(byte* address, int instructions)
5412  : address_(address),
5413  instructions_(instructions),
5414  size_(instructions * Assembler::kInstrSize),
5415  masm_(NULL, address, size_ + Assembler::kGap) {
5416  // Create a new macro assembler pointing to the address of the code to patch.
5417  // The size is adjusted with kGap on order for the assembler to generate size
5418  // bytes of instructions without failing with buffer size constraints.
5419  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5420 }
5421 
5422 
5423 CodePatcher::~CodePatcher() {
5424  // Indicate that code has changed.
5425  CPU::FlushICache(address_, size_);
5426 
5427  // Check that the code was patched as expected.
5428  ASSERT(masm_.pc_ == address_ + size_);
5429  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5430 }
5431 
5432 
5433 void CodePatcher::Emit(Instr instr) {
5434  masm()->emit(instr);
5435 }
5436 
5437 
5438 void CodePatcher::Emit(Address addr) {
5439  masm()->emit(reinterpret_cast<Instr>(addr));
5440 }
5441 
5442 
5443 void CodePatcher::ChangeBranchCondition(Condition cond) {
5444  Instr instr = Assembler::instr_at(masm_.pc_);
5445  ASSERT(Assembler::IsBranch(instr));
5446  uint32_t opcode = Assembler::GetOpcodeField(instr);
5447  // Currently only the 'eq' and 'ne' cond values are supported and the simple
5448  // branch instructions (with opcode being the branch type).
5449  // There are some special cases (see Assembler::IsBranch()) so extending this
5450  // would be tricky.
5451  ASSERT(opcode == BEQ ||
5452  opcode == BNE ||
5453  opcode == BLEZ ||
5454  opcode == BGTZ ||
5455  opcode == BEQL ||
5456  opcode == BNEL ||
5457  opcode == BLEZL ||
5458  opcode == BGTZL);
5459  opcode = (cond == eq) ? BEQ : BNE;
5460  instr = (instr & ~kOpcodeMask) | opcode;
5461  masm_.emit(instr);
5462 }
5463 
5464 
5465 } } // namespace v8::internal
5466 
5467 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:157
const RegList kSafepointSavedRegisters
Definition: frames-arm.h:97
const SwVfpRegister s2
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:4016
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: v8globals.h:471
const Register r3
const int kDoubleSizeLog2
Definition: globals.h:222
const int kNumRegisters
Definition: constants-arm.h:92
const FPURegister f0
const int kImm16Mask
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:38
const int kSignMask
const int kNumSafepointSavedRegisters
Definition: frames-arm.h:98
const int kLuiShift
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const RegList kJSCallerSaved
Definition: frames-arm.h:47
const int kPointerSizeLog2
Definition: globals.h:232
unsigned short uint16_t
Definition: unicode.cc:46
const uint32_t kStringRepresentationMask
Definition: objects.h:474
const uint32_t kFCSRUnderflowFlagMask
#define CHECK(condition)
Definition: checks.h:56
const Register r2
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
bool IsFastElementsKind(ElementsKind kind)
const intptr_t kHeapObjectTagMask
Definition: v8.h:4011
const SwVfpRegister s6
const uint32_t kNotStringTag
Definition: objects.h:457
const Register sp
const SwVfpRegister s3
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
bool IsFastPackedElementsKind(ElementsKind kind)
const int kDoubleSize
Definition: globals.h:218
const uint32_t kFCSROverflowFlagMask
const int kImm16Bits
const uint32_t kIsIndirectStringMask
Definition: objects.h:481
const bool IsMipsSoftFloatABI
const int kOpcodeMask
const int kPointerSize
Definition: globals.h:220
const Address kZapValue
Definition: v8globals.h:80
const int kHeapObjectTag
Definition: v8.h:4009
bool IsAligned(T value, U alignment)
Definition: utils.h:206
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
bool is_uint16(int x)
Definition: assembler.h:853
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
#define cp
const int kCArgSlotCount
const int kImm28Mask
const SwVfpRegister s0
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
bool IsPowerOf2(T x)
Definition: utils.h:50
const FPURegister f2
int TenToThe(int exponent)
Definition: utils.h:795
MacroAssembler(Isolate *isolate, void *buffer, int size)
#define UNIMPLEMENTED_MIPS()
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
const SwVfpRegister s1
InvokeFlag
Definition: v8.h:1425
const uint32_t kFCSRInvalidOpFlagMask
const uint32_t kIsNotStringMask
Definition: objects.h:455
const Register r1
#define kDoubleRegZero
MemOperand FieldMemOperand(Register object, int offset)
bool is_int16(int x)
Definition: assembler.h:837
const int kNumSafepointRegisters
Definition: frames-arm.h:92
const FPUControlRegister FCSR
const int kSafepointRegisterStackIndexMap[kNumRegs]
Definition: frames-mips.h:117
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kSmiTagSize
Definition: v8.h:4015
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
const int kSmiTag
Definition: v8.h:4014
const FPURegister f12
const uint32_t kIsIndirectStringTag
Definition: objects.h:482
const int kPageSizeBits
Definition: v8globals.h:92
const Register no_reg
const Register fp
const uint32_t kFCSRFlagMask
#define STATIC_ASSERT(test)
Definition: checks.h:283
const uint32_t kAsciiStringTag
Definition: objects.h:470
const int kHiMask
signed short int16_t
Definition: unicode.cc:45
Register ToRegister(int num)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
const FPURegister f14
int NumberOfBitsSet(uint32_t x)
Definition: assembler.h:858
const int kCharSize
Definition: globals.h:215
const uint32_t kFCSRInexactFlagMask
const uint32_t kStringEncodingMask
Definition: objects.h:468
const Register r4
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag
Definition: objects-inl.h:3923