v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
29 
30 #include "v8.h"
31 
32 #if defined(V8_TARGET_ARCH_MIPS)
33 
34 #include "bootstrapper.h"
35 #include "codegen.h"
36 #include "debug.h"
37 #include "runtime.h"
38 
39 namespace v8 {
40 namespace internal {
41 
42 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43  : Assembler(arg_isolate, buffer, size),
44  generating_stub_(false),
45  allow_stub_calls_(true),
46  has_frame_(false) {
47  if (isolate() != NULL) {
48  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
49  isolate());
50  }
51 }
52 
53 
54 void MacroAssembler::LoadRoot(Register destination,
55  Heap::RootListIndex index) {
56  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
57 }
58 
59 
60 void MacroAssembler::LoadRoot(Register destination,
61  Heap::RootListIndex index,
62  Condition cond,
63  Register src1, const Operand& src2) {
64  Branch(2, NegateCondition(cond), src1, src2);
65  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
66 }
67 
68 
69 void MacroAssembler::StoreRoot(Register source,
70  Heap::RootListIndex index) {
71  sw(source, MemOperand(s6, index << kPointerSizeLog2));
72 }
73 
74 
75 void MacroAssembler::StoreRoot(Register source,
76  Heap::RootListIndex index,
77  Condition cond,
78  Register src1, const Operand& src2) {
79  Branch(2, NegateCondition(cond), src1, src2);
80  sw(source, MemOperand(s6, index << kPointerSizeLog2));
81 }
82 
83 
84 void MacroAssembler::LoadHeapObject(Register result,
85  Handle<HeapObject> object) {
86  if (isolate()->heap()->InNewSpace(*object)) {
87  Handle<JSGlobalPropertyCell> cell =
88  isolate()->factory()->NewJSGlobalPropertyCell(object);
89  li(result, Operand(cell));
90  lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
91  } else {
92  li(result, Operand(object));
93  }
94 }
95 
96 
97 // Push and pop all registers that can hold pointers.
98 void MacroAssembler::PushSafepointRegisters() {
99  // Safepoints expect a block of kNumSafepointRegisters values on the
100  // stack, so adjust the stack for unsaved registers.
101  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
102  ASSERT(num_unsaved >= 0);
103  if (num_unsaved > 0) {
104  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
105  }
106  MultiPush(kSafepointSavedRegisters);
107 }
108 
109 
110 void MacroAssembler::PopSafepointRegisters() {
111  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
112  MultiPop(kSafepointSavedRegisters);
113  if (num_unsaved > 0) {
114  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
115  }
116 }
117 
118 
119 void MacroAssembler::PushSafepointRegistersAndDoubles() {
120  PushSafepointRegisters();
121  Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
122  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
123  FPURegister reg = FPURegister::FromAllocationIndex(i);
124  sdc1(reg, MemOperand(sp, i * kDoubleSize));
125  }
126 }
127 
128 
129 void MacroAssembler::PopSafepointRegistersAndDoubles() {
130  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
131  FPURegister reg = FPURegister::FromAllocationIndex(i);
132  ldc1(reg, MemOperand(sp, i * kDoubleSize));
133  }
134  Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
135  PopSafepointRegisters();
136 }
137 
138 
139 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
140  Register dst) {
141  sw(src, SafepointRegistersAndDoublesSlot(dst));
142 }
143 
144 
145 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
146  sw(src, SafepointRegisterSlot(dst));
147 }
148 
149 
150 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
151  lw(dst, SafepointRegisterSlot(src));
152 }
153 
154 
155 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
156  // The registers are pushed starting with the highest encoding,
157  // which means that lowest encodings are closest to the stack pointer.
158  return kSafepointRegisterStackIndexMap[reg_code];
159 }
160 
161 
162 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
163  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
164 }
165 
166 
167 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
169  // General purpose registers are pushed last on the stack.
170  int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
171  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
172  return MemOperand(sp, doubles_size + register_offset);
173 }
174 
175 
176 void MacroAssembler::InNewSpace(Register object,
177  Register scratch,
178  Condition cc,
179  Label* branch) {
180  ASSERT(cc == eq || cc == ne);
181  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
182  Branch(branch, cc, scratch,
183  Operand(ExternalReference::new_space_start(isolate())));
184 }
185 
186 
187 void MacroAssembler::RecordWriteField(
188  Register object,
189  int offset,
190  Register value,
191  Register dst,
192  RAStatus ra_status,
193  SaveFPRegsMode save_fp,
194  RememberedSetAction remembered_set_action,
195  SmiCheck smi_check) {
196  ASSERT(!AreAliased(value, dst, t8, object));
197  // First, check if a write barrier is even needed. The tests below
198  // catch stores of Smis.
199  Label done;
200 
201  // Skip barrier if writing a smi.
202  if (smi_check == INLINE_SMI_CHECK) {
203  JumpIfSmi(value, &done);
204  }
205 
206  // Although the object register is tagged, the offset is relative to the start
207  // of the object, so so offset must be a multiple of kPointerSize.
208  ASSERT(IsAligned(offset, kPointerSize));
209 
210  Addu(dst, object, Operand(offset - kHeapObjectTag));
211  if (emit_debug_code()) {
212  Label ok;
213  And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
214  Branch(&ok, eq, t8, Operand(zero_reg));
215  stop("Unaligned cell in write barrier");
216  bind(&ok);
217  }
218 
219  RecordWrite(object,
220  dst,
221  value,
222  ra_status,
223  save_fp,
224  remembered_set_action,
226 
227  bind(&done);
228 
229  // Clobber clobbered input registers when running with the debug-code flag
230  // turned on to provoke errors.
231  if (emit_debug_code()) {
232  li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
233  li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
234  }
235 }
236 
237 
238 // Will clobber 4 registers: object, address, scratch, ip. The
239 // register 'object' contains a heap object pointer. The heap object
240 // tag is shifted away.
241 void MacroAssembler::RecordWrite(Register object,
242  Register address,
243  Register value,
244  RAStatus ra_status,
245  SaveFPRegsMode fp_mode,
246  RememberedSetAction remembered_set_action,
247  SmiCheck smi_check) {
248  ASSERT(!AreAliased(object, address, value, t8));
249  ASSERT(!AreAliased(object, address, value, t9));
250  // The compiled code assumes that record write doesn't change the
251  // context register, so we check that none of the clobbered
252  // registers are cp.
253  ASSERT(!address.is(cp) && !value.is(cp));
254 
255  if (emit_debug_code()) {
256  lw(at, MemOperand(address));
257  Assert(
258  eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
259  }
260 
261  Label done;
262 
263  if (smi_check == INLINE_SMI_CHECK) {
264  ASSERT_EQ(0, kSmiTag);
265  JumpIfSmi(value, &done);
266  }
267 
268  CheckPageFlag(value,
269  value, // Used as scratch.
270  MemoryChunk::kPointersToHereAreInterestingMask,
271  eq,
272  &done);
273  CheckPageFlag(object,
274  value, // Used as scratch.
275  MemoryChunk::kPointersFromHereAreInterestingMask,
276  eq,
277  &done);
278 
279  // Record the actual write.
280  if (ra_status == kRAHasNotBeenSaved) {
281  push(ra);
282  }
283  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
284  CallStub(&stub);
285  if (ra_status == kRAHasNotBeenSaved) {
286  pop(ra);
287  }
288 
289  bind(&done);
290 
291  // Clobber clobbered registers when running with the debug-code flag
292  // turned on to provoke errors.
293  if (emit_debug_code()) {
294  li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
295  li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
296  }
297 }
298 
299 
300 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
301  Register address,
302  Register scratch,
303  SaveFPRegsMode fp_mode,
304  RememberedSetFinalAction and_then) {
305  Label done;
306  if (emit_debug_code()) {
307  Label ok;
308  JumpIfNotInNewSpace(object, scratch, &ok);
309  stop("Remembered set pointer is in new space");
310  bind(&ok);
311  }
312  // Load store buffer top.
313  ExternalReference store_buffer =
314  ExternalReference::store_buffer_top(isolate());
315  li(t8, Operand(store_buffer));
316  lw(scratch, MemOperand(t8));
317  // Store pointer to buffer and increment buffer top.
318  sw(address, MemOperand(scratch));
319  Addu(scratch, scratch, kPointerSize);
320  // Write back new top of buffer.
321  sw(scratch, MemOperand(t8));
322  // Call stub on end of buffer.
323  // Check for end of buffer.
324  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
325  if (and_then == kFallThroughAtEnd) {
326  Branch(&done, eq, t8, Operand(zero_reg));
327  } else {
328  ASSERT(and_then == kReturnAtEnd);
329  Ret(eq, t8, Operand(zero_reg));
330  }
331  push(ra);
332  StoreBufferOverflowStub store_buffer_overflow =
333  StoreBufferOverflowStub(fp_mode);
334  CallStub(&store_buffer_overflow);
335  pop(ra);
336  bind(&done);
337  if (and_then == kReturnAtEnd) {
338  Ret();
339  }
340 }
341 
342 
343 // -----------------------------------------------------------------------------
344 // Allocation support.
345 
346 
347 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
348  Register scratch,
349  Label* miss) {
350  Label same_contexts;
351 
352  ASSERT(!holder_reg.is(scratch));
353  ASSERT(!holder_reg.is(at));
354  ASSERT(!scratch.is(at));
355 
356  // Load current lexical context from the stack frame.
357  lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
358  // In debug mode, make sure the lexical context is set.
359 #ifdef DEBUG
360  Check(ne, "we should not have an empty lexical context",
361  scratch, Operand(zero_reg));
362 #endif
363 
364  // Load the global context of the current context.
365  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
366  lw(scratch, FieldMemOperand(scratch, offset));
367  lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
368 
369  // Check the context is a global context.
370  if (emit_debug_code()) {
371  // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
372  push(holder_reg); // Temporarily save holder on the stack.
373  // Read the first word and compare to the global_context_map.
374  lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
375  LoadRoot(at, Heap::kGlobalContextMapRootIndex);
376  Check(eq, "JSGlobalObject::global_context should be a global context.",
377  holder_reg, Operand(at));
378  pop(holder_reg); // Restore holder.
379  }
380 
381  // Check if both contexts are the same.
382  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
383  Branch(&same_contexts, eq, scratch, Operand(at));
384 
385  // Check the context is a global context.
386  if (emit_debug_code()) {
387  // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
388  push(holder_reg); // Temporarily save holder on the stack.
389  mov(holder_reg, at); // Move at to its holding place.
390  LoadRoot(at, Heap::kNullValueRootIndex);
391  Check(ne, "JSGlobalProxy::context() should not be null.",
392  holder_reg, Operand(at));
393 
394  lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
395  LoadRoot(at, Heap::kGlobalContextMapRootIndex);
396  Check(eq, "JSGlobalObject::global_context should be a global context.",
397  holder_reg, Operand(at));
398  // Restore at is not needed. at is reloaded below.
399  pop(holder_reg); // Restore holder.
400  // Restore at to holder's context.
401  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
402  }
403 
404  // Check that the security token in the calling global object is
405  // compatible with the security token in the receiving global
406  // object.
407  int token_offset = Context::kHeaderSize +
408  Context::SECURITY_TOKEN_INDEX * kPointerSize;
409 
410  lw(scratch, FieldMemOperand(scratch, token_offset));
411  lw(at, FieldMemOperand(at, token_offset));
412  Branch(miss, ne, scratch, Operand(at));
413 
414  bind(&same_contexts);
415 }
416 
417 
418 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
419  // First of all we assign the hash seed to scratch.
420  LoadRoot(scratch, Heap::kHashSeedRootIndex);
421  SmiUntag(scratch);
422 
423  // Xor original key with a seed.
424  xor_(reg0, reg0, scratch);
425 
426  // Compute the hash code from the untagged key. This must be kept in sync
427  // with ComputeIntegerHash in utils.h.
428  //
429  // hash = ~hash + (hash << 15);
430  nor(scratch, reg0, zero_reg);
431  sll(at, reg0, 15);
432  addu(reg0, scratch, at);
433 
434  // hash = hash ^ (hash >> 12);
435  srl(at, reg0, 12);
436  xor_(reg0, reg0, at);
437 
438  // hash = hash + (hash << 2);
439  sll(at, reg0, 2);
440  addu(reg0, reg0, at);
441 
442  // hash = hash ^ (hash >> 4);
443  srl(at, reg0, 4);
444  xor_(reg0, reg0, at);
445 
446  // hash = hash * 2057;
447  sll(scratch, reg0, 11);
448  sll(at, reg0, 3);
449  addu(reg0, reg0, at);
450  addu(reg0, reg0, scratch);
451 
452  // hash = hash ^ (hash >> 16);
453  srl(at, reg0, 16);
454  xor_(reg0, reg0, at);
455 }
456 
457 
458 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
459  Register elements,
460  Register key,
461  Register result,
462  Register reg0,
463  Register reg1,
464  Register reg2) {
465  // Register use:
466  //
467  // elements - holds the slow-case elements of the receiver on entry.
468  // Unchanged unless 'result' is the same register.
469  //
470  // key - holds the smi key on entry.
471  // Unchanged unless 'result' is the same register.
472  //
473  //
474  // result - holds the result on exit if the load succeeded.
475  // Allowed to be the same as 'key' or 'result'.
476  // Unchanged on bailout so 'key' or 'result' can be used
477  // in further computation.
478  //
479  // Scratch registers:
480  //
481  // reg0 - holds the untagged key on entry and holds the hash once computed.
482  //
483  // reg1 - Used to hold the capacity mask of the dictionary.
484  //
485  // reg2 - Used for the index into the dictionary.
486  // at - Temporary (avoid MacroAssembler instructions also using 'at').
487  Label done;
488 
489  GetNumberHash(reg0, reg1);
490 
491  // Compute the capacity mask.
492  lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
493  sra(reg1, reg1, kSmiTagSize);
494  Subu(reg1, reg1, Operand(1));
495 
496  // Generate an unrolled loop that performs a few probes before giving up.
497  static const int kProbes = 4;
498  for (int i = 0; i < kProbes; i++) {
499  // Use reg2 for index calculations and keep the hash intact in reg0.
500  mov(reg2, reg0);
501  // Compute the masked index: (hash + i + i * i) & mask.
502  if (i > 0) {
503  Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
504  }
505  and_(reg2, reg2, reg1);
506 
507  // Scale the index by multiplying by the element size.
508  ASSERT(SeededNumberDictionary::kEntrySize == 3);
509  sll(at, reg2, 1); // 2x.
510  addu(reg2, reg2, at); // reg2 = reg2 * 3.
511 
512  // Check if the key is identical to the name.
513  sll(at, reg2, kPointerSizeLog2);
514  addu(reg2, elements, at);
515 
516  lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
517  if (i != kProbes - 1) {
518  Branch(&done, eq, key, Operand(at));
519  } else {
520  Branch(miss, ne, key, Operand(at));
521  }
522  }
523 
524  bind(&done);
525  // Check that the value is a normal property.
526  // reg2: elements + (index * kPointerSize).
527  const int kDetailsOffset =
528  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
529  lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
530  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
531  Branch(miss, ne, at, Operand(zero_reg));
532 
533  // Get the value at the masked, scaled index and return.
534  const int kValueOffset =
535  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
536  lw(result, FieldMemOperand(reg2, kValueOffset));
537 }
538 
539 
540 // ---------------------------------------------------------------------------
541 // Instruction macros.
542 
543 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
544  if (rt.is_reg()) {
545  addu(rd, rs, rt.rm());
546  } else {
547  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
548  addiu(rd, rs, rt.imm32_);
549  } else {
550  // li handles the relocation.
551  ASSERT(!rs.is(at));
552  li(at, rt);
553  addu(rd, rs, at);
554  }
555  }
556 }
557 
558 
559 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
560  if (rt.is_reg()) {
561  subu(rd, rs, rt.rm());
562  } else {
563  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
564  addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
565  } else {
566  // li handles the relocation.
567  ASSERT(!rs.is(at));
568  li(at, rt);
569  subu(rd, rs, at);
570  }
571  }
572 }
573 
574 
575 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
576  if (rt.is_reg()) {
577  if (kArchVariant == kLoongson) {
578  mult(rs, rt.rm());
579  mflo(rd);
580  } else {
581  mul(rd, rs, rt.rm());
582  }
583  } else {
584  // li handles the relocation.
585  ASSERT(!rs.is(at));
586  li(at, rt);
587  if (kArchVariant == kLoongson) {
588  mult(rs, at);
589  mflo(rd);
590  } else {
591  mul(rd, rs, at);
592  }
593  }
594 }
595 
596 
597 void MacroAssembler::Mult(Register rs, const Operand& rt) {
598  if (rt.is_reg()) {
599  mult(rs, rt.rm());
600  } else {
601  // li handles the relocation.
602  ASSERT(!rs.is(at));
603  li(at, rt);
604  mult(rs, at);
605  }
606 }
607 
608 
609 void MacroAssembler::Multu(Register rs, const Operand& rt) {
610  if (rt.is_reg()) {
611  multu(rs, rt.rm());
612  } else {
613  // li handles the relocation.
614  ASSERT(!rs.is(at));
615  li(at, rt);
616  multu(rs, at);
617  }
618 }
619 
620 
621 void MacroAssembler::Div(Register rs, const Operand& rt) {
622  if (rt.is_reg()) {
623  div(rs, rt.rm());
624  } else {
625  // li handles the relocation.
626  ASSERT(!rs.is(at));
627  li(at, rt);
628  div(rs, at);
629  }
630 }
631 
632 
633 void MacroAssembler::Divu(Register rs, const Operand& rt) {
634  if (rt.is_reg()) {
635  divu(rs, rt.rm());
636  } else {
637  // li handles the relocation.
638  ASSERT(!rs.is(at));
639  li(at, rt);
640  divu(rs, at);
641  }
642 }
643 
644 
645 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
646  if (rt.is_reg()) {
647  and_(rd, rs, rt.rm());
648  } else {
649  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
650  andi(rd, rs, rt.imm32_);
651  } else {
652  // li handles the relocation.
653  ASSERT(!rs.is(at));
654  li(at, rt);
655  and_(rd, rs, at);
656  }
657  }
658 }
659 
660 
661 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
662  if (rt.is_reg()) {
663  or_(rd, rs, rt.rm());
664  } else {
665  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
666  ori(rd, rs, rt.imm32_);
667  } else {
668  // li handles the relocation.
669  ASSERT(!rs.is(at));
670  li(at, rt);
671  or_(rd, rs, at);
672  }
673  }
674 }
675 
676 
677 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
678  if (rt.is_reg()) {
679  xor_(rd, rs, rt.rm());
680  } else {
681  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
682  xori(rd, rs, rt.imm32_);
683  } else {
684  // li handles the relocation.
685  ASSERT(!rs.is(at));
686  li(at, rt);
687  xor_(rd, rs, at);
688  }
689  }
690 }
691 
692 
693 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
694  if (rt.is_reg()) {
695  nor(rd, rs, rt.rm());
696  } else {
697  // li handles the relocation.
698  ASSERT(!rs.is(at));
699  li(at, rt);
700  nor(rd, rs, at);
701  }
702 }
703 
704 
705 void MacroAssembler::Neg(Register rs, const Operand& rt) {
706  ASSERT(rt.is_reg());
707  ASSERT(!at.is(rs));
708  ASSERT(!at.is(rt.rm()));
709  li(at, -1);
710  xor_(rs, rt.rm(), at);
711 }
712 
713 
714 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
715  if (rt.is_reg()) {
716  slt(rd, rs, rt.rm());
717  } else {
718  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
719  slti(rd, rs, rt.imm32_);
720  } else {
721  // li handles the relocation.
722  ASSERT(!rs.is(at));
723  li(at, rt);
724  slt(rd, rs, at);
725  }
726  }
727 }
728 
729 
730 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
731  if (rt.is_reg()) {
732  sltu(rd, rs, rt.rm());
733  } else {
734  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
735  sltiu(rd, rs, rt.imm32_);
736  } else {
737  // li handles the relocation.
738  ASSERT(!rs.is(at));
739  li(at, rt);
740  sltu(rd, rs, at);
741  }
742  }
743 }
744 
745 
746 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
747  if (kArchVariant == kMips32r2) {
748  if (rt.is_reg()) {
749  rotrv(rd, rs, rt.rm());
750  } else {
751  rotr(rd, rs, rt.imm32_);
752  }
753  } else {
754  if (rt.is_reg()) {
755  subu(at, zero_reg, rt.rm());
756  sllv(at, rs, at);
757  srlv(rd, rs, rt.rm());
758  or_(rd, rd, at);
759  } else {
760  if (rt.imm32_ == 0) {
761  srl(rd, rs, 0);
762  } else {
763  srl(at, rs, rt.imm32_);
764  sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
765  or_(rd, rd, at);
766  }
767  }
768  }
769 }
770 
771 //------------Pseudo-instructions-------------
772 
773 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
774  ASSERT(!j.is_reg());
775  BlockTrampolinePoolScope block_trampoline_pool(this);
776  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
777  // Normal load of an immediate value which does not need Relocation Info.
778  if (is_int16(j.imm32_)) {
779  addiu(rd, zero_reg, j.imm32_);
780  } else if (!(j.imm32_ & kHiMask)) {
781  ori(rd, zero_reg, j.imm32_);
782  } else if (!(j.imm32_ & kImm16Mask)) {
783  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
784  } else {
785  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
786  ori(rd, rd, (j.imm32_ & kImm16Mask));
787  }
788  } else {
789  if (MustUseReg(j.rmode_)) {
790  RecordRelocInfo(j.rmode_, j.imm32_);
791  }
792  // We always need the same number of instructions as we may need to patch
793  // this code to load another value which may need 2 instructions to load.
794  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
795  ori(rd, rd, (j.imm32_ & kImm16Mask));
796  }
797 }
798 
799 
800 void MacroAssembler::MultiPush(RegList regs) {
801  int16_t num_to_push = NumberOfBitsSet(regs);
802  int16_t stack_offset = num_to_push * kPointerSize;
803 
804  Subu(sp, sp, Operand(stack_offset));
805  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
806  if ((regs & (1 << i)) != 0) {
807  stack_offset -= kPointerSize;
808  sw(ToRegister(i), MemOperand(sp, stack_offset));
809  }
810  }
811 }
812 
813 
814 void MacroAssembler::MultiPushReversed(RegList regs) {
815  int16_t num_to_push = NumberOfBitsSet(regs);
816  int16_t stack_offset = num_to_push * kPointerSize;
817 
818  Subu(sp, sp, Operand(stack_offset));
819  for (int16_t i = 0; i < kNumRegisters; i++) {
820  if ((regs & (1 << i)) != 0) {
821  stack_offset -= kPointerSize;
822  sw(ToRegister(i), MemOperand(sp, stack_offset));
823  }
824  }
825 }
826 
827 
828 void MacroAssembler::MultiPop(RegList regs) {
829  int16_t stack_offset = 0;
830 
831  for (int16_t i = 0; i < kNumRegisters; i++) {
832  if ((regs & (1 << i)) != 0) {
833  lw(ToRegister(i), MemOperand(sp, stack_offset));
834  stack_offset += kPointerSize;
835  }
836  }
837  addiu(sp, sp, stack_offset);
838 }
839 
840 
841 void MacroAssembler::MultiPopReversed(RegList regs) {
842  int16_t stack_offset = 0;
843 
844  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
845  if ((regs & (1 << i)) != 0) {
846  lw(ToRegister(i), MemOperand(sp, stack_offset));
847  stack_offset += kPointerSize;
848  }
849  }
850  addiu(sp, sp, stack_offset);
851 }
852 
853 
854 void MacroAssembler::MultiPushFPU(RegList regs) {
855  CpuFeatures::Scope scope(FPU);
856  int16_t num_to_push = NumberOfBitsSet(regs);
857  int16_t stack_offset = num_to_push * kDoubleSize;
858 
859  Subu(sp, sp, Operand(stack_offset));
860  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
861  if ((regs & (1 << i)) != 0) {
862  stack_offset -= kDoubleSize;
863  sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
864  }
865  }
866 }
867 
868 
869 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
870  CpuFeatures::Scope scope(FPU);
871  int16_t num_to_push = NumberOfBitsSet(regs);
872  int16_t stack_offset = num_to_push * kDoubleSize;
873 
874  Subu(sp, sp, Operand(stack_offset));
875  for (int16_t i = 0; i < kNumRegisters; i++) {
876  if ((regs & (1 << i)) != 0) {
877  stack_offset -= kDoubleSize;
878  sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
879  }
880  }
881 }
882 
883 
884 void MacroAssembler::MultiPopFPU(RegList regs) {
885  CpuFeatures::Scope scope(FPU);
886  int16_t stack_offset = 0;
887 
888  for (int16_t i = 0; i < kNumRegisters; i++) {
889  if ((regs & (1 << i)) != 0) {
890  ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
891  stack_offset += kDoubleSize;
892  }
893  }
894  addiu(sp, sp, stack_offset);
895 }
896 
897 
898 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
899  CpuFeatures::Scope scope(FPU);
900  int16_t stack_offset = 0;
901 
902  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
903  if ((regs & (1 << i)) != 0) {
904  ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
905  stack_offset += kDoubleSize;
906  }
907  }
908  addiu(sp, sp, stack_offset);
909 }
910 
911 
912 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
913  RegList saved_regs = kJSCallerSaved | ra.bit();
914  MultiPush(saved_regs);
915  AllowExternalCallThatCantCauseGC scope(this);
916 
917  // Save to a0 in case address == t0.
918  Move(a0, address);
919  PrepareCallCFunction(2, t0);
920 
921  li(a1, instructions * kInstrSize);
922  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
923  MultiPop(saved_regs);
924 }
925 
926 
927 void MacroAssembler::Ext(Register rt,
928  Register rs,
929  uint16_t pos,
930  uint16_t size) {
931  ASSERT(pos < 32);
932  ASSERT(pos + size < 33);
933 
934  if (kArchVariant == kMips32r2) {
935  ext_(rt, rs, pos, size);
936  } else {
937  // Move rs to rt and shift it left then right to get the
938  // desired bitfield on the right side and zeroes on the left.
939  int shift_left = 32 - (pos + size);
940  sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
941 
942  int shift_right = 32 - size;
943  if (shift_right > 0) {
944  srl(rt, rt, shift_right);
945  }
946  }
947 }
948 
949 
950 void MacroAssembler::Ins(Register rt,
951  Register rs,
952  uint16_t pos,
953  uint16_t size) {
954  ASSERT(pos < 32);
955  ASSERT(pos + size <= 32);
956  ASSERT(size != 0);
957 
958  if (kArchVariant == kMips32r2) {
959  ins_(rt, rs, pos, size);
960  } else {
961  ASSERT(!rt.is(t8) && !rs.is(t8));
962  Subu(at, zero_reg, Operand(1));
963  srl(at, at, 32 - size);
964  and_(t8, rs, at);
965  sll(t8, t8, pos);
966  sll(at, at, pos);
967  nor(at, at, zero_reg);
968  and_(at, rt, at);
969  or_(rt, t8, at);
970  }
971 }
972 
973 
974 void MacroAssembler::Cvt_d_uw(FPURegister fd,
975  FPURegister fs,
976  FPURegister scratch) {
977  // Move the data from fs to t8.
978  mfc1(t8, fs);
979  Cvt_d_uw(fd, t8, scratch);
980 }
981 
982 
983 void MacroAssembler::Cvt_d_uw(FPURegister fd,
984  Register rs,
985  FPURegister scratch) {
986  // Convert rs to a FP value in fd (and fd + 1).
987  // We do this by converting rs minus the MSB to avoid sign conversion,
988  // then adding 2^31 to the result (if needed).
989 
990  ASSERT(!fd.is(scratch));
991  ASSERT(!rs.is(t9));
992  ASSERT(!rs.is(at));
993 
994  // Save rs's MSB to t9.
995  Ext(t9, rs, 31, 1);
996  // Remove rs's MSB.
997  Ext(at, rs, 0, 31);
998  // Move the result to fd.
999  mtc1(at, fd);
1000 
1001  // Convert fd to a real FP value.
1002  cvt_d_w(fd, fd);
1003 
1004  Label conversion_done;
1005 
1006  // If rs's MSB was 0, it's done.
1007  // Otherwise we need to add that to the FP register.
1008  Branch(&conversion_done, eq, t9, Operand(zero_reg));
1009 
1010  // Load 2^31 into f20 as its float representation.
1011  li(at, 0x41E00000);
1012  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1013  mtc1(zero_reg, scratch);
1014  // Add it to fd.
1015  add_d(fd, fd, scratch);
1016 
1017  bind(&conversion_done);
1018 }
1019 
1020 
1021 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1022  FPURegister fs,
1023  FPURegister scratch) {
1024  Trunc_uw_d(fs, t8, scratch);
1025  mtc1(t8, fd);
1026 }
1027 
1028 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1029  if (kArchVariant == kLoongson && fd.is(fs)) {
1030  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1031  trunc_w_d(fd, fs);
1032  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1033  } else {
1034  trunc_w_d(fd, fs);
1035  }
1036 }
1037 
1038 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1039  if (kArchVariant == kLoongson && fd.is(fs)) {
1040  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1041  round_w_d(fd, fs);
1042  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1043  } else {
1044  round_w_d(fd, fs);
1045  }
1046 }
1047 
1048 
1049 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1050  if (kArchVariant == kLoongson && fd.is(fs)) {
1051  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1052  floor_w_d(fd, fs);
1053  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1054  } else {
1055  floor_w_d(fd, fs);
1056  }
1057 }
1058 
1059 
1060 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1061  if (kArchVariant == kLoongson && fd.is(fs)) {
1062  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1063  ceil_w_d(fd, fs);
1064  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1065  } else {
1066  ceil_w_d(fd, fs);
1067  }
1068 }
1069 
1070 
1071 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1072  Register rs,
1073  FPURegister scratch) {
1074  ASSERT(!fd.is(scratch));
1075  ASSERT(!rs.is(at));
1076 
1077  // Load 2^31 into scratch as its float representation.
1078  li(at, 0x41E00000);
1079  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1080  mtc1(zero_reg, scratch);
1081  // Test if scratch > fd.
1082  // If fd < 2^31 we can convert it normally.
1083  Label simple_convert;
1084  BranchF(&simple_convert, NULL, lt, fd, scratch);
1085 
1086  // First we subtract 2^31 from fd, then trunc it to rs
1087  // and add 2^31 to rs.
1088  sub_d(scratch, fd, scratch);
1089  trunc_w_d(scratch, scratch);
1090  mfc1(rs, scratch);
1091  Or(rs, rs, 1 << 31);
1092 
1093  Label done;
1094  Branch(&done);
1095  // Simple conversion.
1096  bind(&simple_convert);
1097  trunc_w_d(scratch, fd);
1098  mfc1(rs, scratch);
1099 
1100  bind(&done);
1101 }
1102 
1103 
1104 void MacroAssembler::BranchF(Label* target,
1105  Label* nan,
1106  Condition cc,
1107  FPURegister cmp1,
1108  FPURegister cmp2,
1109  BranchDelaySlot bd) {
1110  if (cc == al) {
1111  Branch(bd, target);
1112  return;
1113  }
1114 
1115  ASSERT(nan || target);
1116  // Check for unordered (NaN) cases.
1117  if (nan) {
1118  c(UN, D, cmp1, cmp2);
1119  bc1t(nan);
1120  }
1121 
1122  if (target) {
1123  // Here NaN cases were either handled by this function or are assumed to
1124  // have been handled by the caller.
1125  // Unsigned conditions are treated as their signed counterpart.
1126  switch (cc) {
1127  case Uless:
1128  case less:
1129  c(OLT, D, cmp1, cmp2);
1130  bc1t(target);
1131  break;
1132  case Ugreater:
1133  case greater:
1134  c(ULE, D, cmp1, cmp2);
1135  bc1f(target);
1136  break;
1137  case Ugreater_equal:
1138  case greater_equal:
1139  c(ULT, D, cmp1, cmp2);
1140  bc1f(target);
1141  break;
1142  case Uless_equal:
1143  case less_equal:
1144  c(OLE, D, cmp1, cmp2);
1145  bc1t(target);
1146  break;
1147  case eq:
1148  c(EQ, D, cmp1, cmp2);
1149  bc1t(target);
1150  break;
1151  case ne:
1152  c(EQ, D, cmp1, cmp2);
1153  bc1f(target);
1154  break;
1155  default:
1156  CHECK(0);
1157  };
1158  }
1159 
1160  if (bd == PROTECT) {
1161  nop();
1162  }
1163 }
1164 
1165 
1166 void MacroAssembler::Move(FPURegister dst, double imm) {
1167  ASSERT(CpuFeatures::IsEnabled(FPU));
1168  static const DoubleRepresentation minus_zero(-0.0);
1169  static const DoubleRepresentation zero(0.0);
1170  DoubleRepresentation value(imm);
1171  // Handle special values first.
1172  bool force_load = dst.is(kDoubleRegZero);
1173  if (value.bits == zero.bits && !force_load) {
1174  mov_d(dst, kDoubleRegZero);
1175  } else if (value.bits == minus_zero.bits && !force_load) {
1176  neg_d(dst, kDoubleRegZero);
1177  } else {
1178  uint32_t lo, hi;
1179  DoubleAsTwoUInt32(imm, &lo, &hi);
1180  // Move the low part of the double into the lower of the corresponding FPU
1181  // register of FPU register pair.
1182  if (lo != 0) {
1183  li(at, Operand(lo));
1184  mtc1(at, dst);
1185  } else {
1186  mtc1(zero_reg, dst);
1187  }
1188  // Move the high part of the double into the higher of the corresponding FPU
1189  // register of FPU register pair.
1190  if (hi != 0) {
1191  li(at, Operand(hi));
1192  mtc1(at, dst.high());
1193  } else {
1194  mtc1(zero_reg, dst.high());
1195  }
1196  }
1197 }
1198 
1199 
1200 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1201  if (kArchVariant == kLoongson) {
1202  Label done;
1203  Branch(&done, ne, rt, Operand(zero_reg));
1204  mov(rd, rs);
1205  bind(&done);
1206  } else {
1207  movz(rd, rs, rt);
1208  }
1209 }
1210 
1211 
1212 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1213  if (kArchVariant == kLoongson) {
1214  Label done;
1215  Branch(&done, eq, rt, Operand(zero_reg));
1216  mov(rd, rs);
1217  bind(&done);
1218  } else {
1219  movn(rd, rs, rt);
1220  }
1221 }
1222 
1223 
1224 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1225  if (kArchVariant == kLoongson) {
1226  // Tests an FP condition code and then conditionally move rs to rd.
1227  // We do not currently use any FPU cc bit other than bit 0.
1228  ASSERT(cc == 0);
1229  ASSERT(!(rs.is(t8) || rd.is(t8)));
1230  Label done;
1231  Register scratch = t8;
1232  // For testing purposes we need to fetch content of the FCSR register and
1233  // than test its cc (floating point condition code) bit (for cc = 0, it is
1234  // 24. bit of the FCSR).
1235  cfc1(scratch, FCSR);
1236  // For the MIPS I, II and III architectures, the contents of scratch is
1237  // UNPREDICTABLE for the instruction immediately following CFC1.
1238  nop();
1239  srl(scratch, scratch, 16);
1240  andi(scratch, scratch, 0x0080);
1241  Branch(&done, eq, scratch, Operand(zero_reg));
1242  mov(rd, rs);
1243  bind(&done);
1244  } else {
1245  movt(rd, rs, cc);
1246  }
1247 }
1248 
1249 
1250 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1251  if (kArchVariant == kLoongson) {
1252  // Tests an FP condition code and then conditionally move rs to rd.
1253  // We do not currently use any FPU cc bit other than bit 0.
1254  ASSERT(cc == 0);
1255  ASSERT(!(rs.is(t8) || rd.is(t8)));
1256  Label done;
1257  Register scratch = t8;
1258  // For testing purposes we need to fetch content of the FCSR register and
1259  // than test its cc (floating point condition code) bit (for cc = 0, it is
1260  // 24. bit of the FCSR).
1261  cfc1(scratch, FCSR);
1262  // For the MIPS I, II and III architectures, the contents of scratch is
1263  // UNPREDICTABLE for the instruction immediately following CFC1.
1264  nop();
1265  srl(scratch, scratch, 16);
1266  andi(scratch, scratch, 0x0080);
1267  Branch(&done, ne, scratch, Operand(zero_reg));
1268  mov(rd, rs);
1269  bind(&done);
1270  } else {
1271  movf(rd, rs, cc);
1272  }
1273 }
1274 
1275 
1276 void MacroAssembler::Clz(Register rd, Register rs) {
1277  if (kArchVariant == kLoongson) {
1278  ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1279  Register mask = t8;
1280  Register scratch = t9;
1281  Label loop, end;
1282  mov(at, rs);
1283  mov(rd, zero_reg);
1284  lui(mask, 0x8000);
1285  bind(&loop);
1286  and_(scratch, at, mask);
1287  Branch(&end, ne, scratch, Operand(zero_reg));
1288  addiu(rd, rd, 1);
1289  Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1290  srl(mask, mask, 1);
1291  bind(&end);
1292  } else {
1293  clz(rd, rs);
1294  }
1295 }
1296 
1297 
1298 // Tries to get a signed int32 out of a double precision floating point heap
1299 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
1300 // 32bits signed integer range.
1301 // This method implementation differs from the ARM version for performance
1302 // reasons.
1303 void MacroAssembler::ConvertToInt32(Register source,
1304  Register dest,
1305  Register scratch,
1306  Register scratch2,
1307  FPURegister double_scratch,
1308  Label *not_int32) {
1309  Label right_exponent, done;
1310  // Get exponent word (ENDIAN issues).
1311  lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
1312  // Get exponent alone in scratch2.
1313  And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
1314  // Load dest with zero. We use this either for the final shift or
1315  // for the answer.
1316  mov(dest, zero_reg);
1317  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
1318  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
1319  // the exponent that we are fastest at and also the highest exponent we can
1320  // handle here.
1321  const uint32_t non_smi_exponent =
1322  (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1323  // If we have a match of the int32-but-not-Smi exponent then skip some logic.
1324  Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
1325  // If the exponent is higher than that then go to not_int32 case. This
1326  // catches numbers that don't fit in a signed int32, infinities and NaNs.
1327  Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
1328 
1329  // We know the exponent is smaller than 30 (biased). If it is less than
1330  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
1331  // it rounds to zero.
1332  const uint32_t zero_exponent =
1333  (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
1334  Subu(scratch2, scratch2, Operand(zero_exponent));
1335  // Dest already has a Smi zero.
1336  Branch(&done, lt, scratch2, Operand(zero_reg));
1337  if (!CpuFeatures::IsSupported(FPU)) {
1338  // We have a shifted exponent between 0 and 30 in scratch2.
1339  srl(dest, scratch2, HeapNumber::kExponentShift);
1340  // We now have the exponent in dest. Subtract from 30 to get
1341  // how much to shift down.
1342  li(at, Operand(30));
1343  subu(dest, at, dest);
1344  }
1345  bind(&right_exponent);
1346  if (CpuFeatures::IsSupported(FPU)) {
1347  CpuFeatures::Scope scope(FPU);
1348  // MIPS FPU instructions implementing double precision to integer
1349  // conversion using round to zero. Since the FP value was qualified
1350  // above, the resulting integer should be a legal int32.
1351  // The original 'Exponent' word is still in scratch.
1352  lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1353  mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
1354  trunc_w_d(double_scratch, double_scratch);
1355  mfc1(dest, double_scratch);
1356  } else {
1357  // On entry, dest has final downshift, scratch has original sign/exp/mant.
1358  // Save sign bit in top bit of dest.
1359  And(scratch2, scratch, Operand(0x80000000));
1360  Or(dest, dest, Operand(scratch2));
1361  // Put back the implicit 1, just above mantissa field.
1362  Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
1363 
1364  // Shift up the mantissa bits to take up the space the exponent used to
1365  // take. We just orred in the implicit bit so that took care of one and
1366  // we want to leave the sign bit 0 so we subtract 2 bits from the shift
1367  // distance. But we want to clear the sign-bit so shift one more bit
1368  // left, then shift right one bit.
1369  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1370  sll(scratch, scratch, shift_distance + 1);
1371  srl(scratch, scratch, 1);
1372 
1373  // Get the second half of the double. For some exponents we don't
1374  // actually need this because the bits get shifted out again, but
1375  // it's probably slower to test than just to do it.
1376  lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1377  // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
1378  // The width of the field here is the same as the shift amount above.
1379  const int field_width = shift_distance;
1380  Ext(scratch2, scratch2, 32-shift_distance, field_width);
1381  Ins(scratch, scratch2, 0, field_width);
1382  // Move down according to the exponent.
1383  srlv(scratch, scratch, dest);
1384  // Prepare the negative version of our integer.
1385  subu(scratch2, zero_reg, scratch);
1386  // Trick to check sign bit (msb) held in dest, count leading zero.
1387  // 0 indicates negative, save negative version with conditional move.
1388  Clz(dest, dest);
1389  Movz(scratch, scratch2, dest);
1390  mov(dest, scratch);
1391  }
1392  bind(&done);
1393 }
1394 
1395 
1396 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1397  FPURegister result,
1398  DoubleRegister double_input,
1399  Register scratch1,
1400  Register except_flag,
1401  CheckForInexactConversion check_inexact) {
1402  ASSERT(CpuFeatures::IsSupported(FPU));
1403  CpuFeatures::Scope scope(FPU);
1404 
1405  int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1406 
1407  if (check_inexact == kDontCheckForInexactConversion) {
1408  // Ingore inexact exceptions.
1409  except_mask &= ~kFCSRInexactFlagMask;
1410  }
1411 
1412  // Save FCSR.
1413  cfc1(scratch1, FCSR);
1414  // Disable FPU exceptions.
1415  ctc1(zero_reg, FCSR);
1416 
1417  // Do operation based on rounding mode.
1418  switch (rounding_mode) {
1419  case kRoundToNearest:
1420  Round_w_d(result, double_input);
1421  break;
1422  case kRoundToZero:
1423  Trunc_w_d(result, double_input);
1424  break;
1425  case kRoundToPlusInf:
1426  Ceil_w_d(result, double_input);
1427  break;
1428  case kRoundToMinusInf:
1429  Floor_w_d(result, double_input);
1430  break;
1431  } // End of switch-statement.
1432 
1433  // Retrieve FCSR.
1434  cfc1(except_flag, FCSR);
1435  // Restore FCSR.
1436  ctc1(scratch1, FCSR);
1437 
1438  // Check for fpu exceptions.
1439  And(except_flag, except_flag, Operand(except_mask));
1440 }
1441 
1442 
1443 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1444  Register input_high,
1445  Register input_low,
1446  Register scratch) {
1447  Label done, normal_exponent, restore_sign;
1448  // Extract the biased exponent in result.
1449  Ext(result,
1450  input_high,
1451  HeapNumber::kExponentShift,
1452  HeapNumber::kExponentBits);
1453 
1454  // Check for Infinity and NaNs, which should return 0.
1455  Subu(scratch, result, HeapNumber::kExponentMask);
1456  Movz(result, zero_reg, scratch);
1457  Branch(&done, eq, scratch, Operand(zero_reg));
1458 
1459  // Express exponent as delta to (number of mantissa bits + 31).
1460  Subu(result,
1461  result,
1462  Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
1463 
1464  // If the delta is strictly positive, all bits would be shifted away,
1465  // which means that we can return 0.
1466  Branch(&normal_exponent, le, result, Operand(zero_reg));
1467  mov(result, zero_reg);
1468  Branch(&done);
1469 
1470  bind(&normal_exponent);
1471  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
1472  // Calculate shift.
1473  Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
1474 
1475  // Save the sign.
1476  Register sign = result;
1477  result = no_reg;
1478  And(sign, input_high, Operand(HeapNumber::kSignMask));
1479 
1480  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
1481  // to check for this specific case.
1482  Label high_shift_needed, high_shift_done;
1483  Branch(&high_shift_needed, lt, scratch, Operand(32));
1484  mov(input_high, zero_reg);
1485  Branch(&high_shift_done);
1486  bind(&high_shift_needed);
1487 
1488  // Set the implicit 1 before the mantissa part in input_high.
1489  Or(input_high,
1490  input_high,
1491  Operand(1 << HeapNumber::kMantissaBitsInTopWord));
1492  // Shift the mantissa bits to the correct position.
1493  // We don't need to clear non-mantissa bits as they will be shifted away.
1494  // If they weren't, it would mean that the answer is in the 32bit range.
1495  sllv(input_high, input_high, scratch);
1496 
1497  bind(&high_shift_done);
1498 
1499  // Replace the shifted bits with bits from the lower mantissa word.
1500  Label pos_shift, shift_done;
1501  li(at, 32);
1502  subu(scratch, at, scratch);
1503  Branch(&pos_shift, ge, scratch, Operand(zero_reg));
1504 
1505  // Negate scratch.
1506  Subu(scratch, zero_reg, scratch);
1507  sllv(input_low, input_low, scratch);
1508  Branch(&shift_done);
1509 
1510  bind(&pos_shift);
1511  srlv(input_low, input_low, scratch);
1512 
1513  bind(&shift_done);
1514  Or(input_high, input_high, Operand(input_low));
1515  // Restore sign if necessary.
1516  mov(scratch, sign);
1517  result = sign;
1518  sign = no_reg;
1519  Subu(result, zero_reg, input_high);
1520  Movz(result, input_high, scratch);
1521  bind(&done);
1522 }
1523 
1524 
1525 void MacroAssembler::EmitECMATruncate(Register result,
1526  FPURegister double_input,
1527  FPURegister single_scratch,
1528  Register scratch,
1529  Register scratch2,
1530  Register scratch3) {
1531  CpuFeatures::Scope scope(FPU);
1532  ASSERT(!scratch2.is(result));
1533  ASSERT(!scratch3.is(result));
1534  ASSERT(!scratch3.is(scratch2));
1535  ASSERT(!scratch.is(result) &&
1536  !scratch.is(scratch2) &&
1537  !scratch.is(scratch3));
1538  ASSERT(!single_scratch.is(double_input));
1539 
1540  Label done;
1541  Label manual;
1542 
1543  // Clear cumulative exception flags and save the FCSR.
1544  cfc1(scratch2, FCSR);
1545  ctc1(zero_reg, FCSR);
1546  // Try a conversion to a signed integer.
1547  trunc_w_d(single_scratch, double_input);
1548  mfc1(result, single_scratch);
1549  // Retrieve and restore the FCSR.
1550  cfc1(scratch, FCSR);
1551  ctc1(scratch2, FCSR);
1552  // Check for overflow and NaNs.
1553  And(scratch,
1554  scratch,
1556  // If we had no exceptions we are done.
1557  Branch(&done, eq, scratch, Operand(zero_reg));
1558 
1559  // Load the double value and perform a manual truncation.
1560  Register input_high = scratch2;
1561  Register input_low = scratch3;
1562  Move(input_low, input_high, double_input);
1563  EmitOutOfInt32RangeTruncate(result,
1564  input_high,
1565  input_low,
1566  scratch);
1567  bind(&done);
1568 }
1569 
1570 
1571 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1572  Register src,
1573  int num_least_bits) {
1574  Ext(dst, src, kSmiTagSize, num_least_bits);
1575 }
1576 
1577 
1578 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1579  Register src,
1580  int num_least_bits) {
1581  And(dst, src, Operand((1 << num_least_bits) - 1));
1582 }
1583 
1584 
1585 // Emulated condtional branches do not emit a nop in the branch delay slot.
1586 //
1587 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1588 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1589  (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1590  (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1591 
1592 
1593 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1594  BranchShort(offset, bdslot);
1595 }
1596 
1597 
1598 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1599  const Operand& rt,
1600  BranchDelaySlot bdslot) {
1601  BranchShort(offset, cond, rs, rt, bdslot);
1602 }
1603 
1604 
1605 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1606  if (L->is_bound()) {
1607  if (is_near(L)) {
1608  BranchShort(L, bdslot);
1609  } else {
1610  Jr(L, bdslot);
1611  }
1612  } else {
1613  if (is_trampoline_emitted()) {
1614  Jr(L, bdslot);
1615  } else {
1616  BranchShort(L, bdslot);
1617  }
1618  }
1619 }
1620 
1621 
1622 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1623  const Operand& rt,
1624  BranchDelaySlot bdslot) {
1625  if (L->is_bound()) {
1626  if (is_near(L)) {
1627  BranchShort(L, cond, rs, rt, bdslot);
1628  } else {
1629  Label skip;
1630  Condition neg_cond = NegateCondition(cond);
1631  BranchShort(&skip, neg_cond, rs, rt);
1632  Jr(L, bdslot);
1633  bind(&skip);
1634  }
1635  } else {
1636  if (is_trampoline_emitted()) {
1637  Label skip;
1638  Condition neg_cond = NegateCondition(cond);
1639  BranchShort(&skip, neg_cond, rs, rt);
1640  Jr(L, bdslot);
1641  bind(&skip);
1642  } else {
1643  BranchShort(L, cond, rs, rt, bdslot);
1644  }
1645  }
1646 }
1647 
1648 
1649 void MacroAssembler::Branch(Label* L,
1650  Condition cond,
1651  Register rs,
1652  Heap::RootListIndex index,
1653  BranchDelaySlot bdslot) {
1654  LoadRoot(at, index);
1655  Branch(L, cond, rs, Operand(at), bdslot);
1656 }
1657 
1658 
1659 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1660  b(offset);
1661 
1662  // Emit a nop in the branch delay slot if required.
1663  if (bdslot == PROTECT)
1664  nop();
1665 }
1666 
1667 
1668 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1669  const Operand& rt,
1670  BranchDelaySlot bdslot) {
1671  BRANCH_ARGS_CHECK(cond, rs, rt);
1672  ASSERT(!rs.is(zero_reg));
1673  Register r2 = no_reg;
1674  Register scratch = at;
1675 
1676  if (rt.is_reg()) {
1677  // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1678  // rt.
1679  r2 = rt.rm_;
1680  switch (cond) {
1681  case cc_always:
1682  b(offset);
1683  break;
1684  case eq:
1685  beq(rs, r2, offset);
1686  break;
1687  case ne:
1688  bne(rs, r2, offset);
1689  break;
1690  // Signed comparison.
1691  case greater:
1692  if (r2.is(zero_reg)) {
1693  bgtz(rs, offset);
1694  } else {
1695  slt(scratch, r2, rs);
1696  bne(scratch, zero_reg, offset);
1697  }
1698  break;
1699  case greater_equal:
1700  if (r2.is(zero_reg)) {
1701  bgez(rs, offset);
1702  } else {
1703  slt(scratch, rs, r2);
1704  beq(scratch, zero_reg, offset);
1705  }
1706  break;
1707  case less:
1708  if (r2.is(zero_reg)) {
1709  bltz(rs, offset);
1710  } else {
1711  slt(scratch, rs, r2);
1712  bne(scratch, zero_reg, offset);
1713  }
1714  break;
1715  case less_equal:
1716  if (r2.is(zero_reg)) {
1717  blez(rs, offset);
1718  } else {
1719  slt(scratch, r2, rs);
1720  beq(scratch, zero_reg, offset);
1721  }
1722  break;
1723  // Unsigned comparison.
1724  case Ugreater:
1725  if (r2.is(zero_reg)) {
1726  bgtz(rs, offset);
1727  } else {
1728  sltu(scratch, r2, rs);
1729  bne(scratch, zero_reg, offset);
1730  }
1731  break;
1732  case Ugreater_equal:
1733  if (r2.is(zero_reg)) {
1734  bgez(rs, offset);
1735  } else {
1736  sltu(scratch, rs, r2);
1737  beq(scratch, zero_reg, offset);
1738  }
1739  break;
1740  case Uless:
1741  if (r2.is(zero_reg)) {
1742  // No code needs to be emitted.
1743  return;
1744  } else {
1745  sltu(scratch, rs, r2);
1746  bne(scratch, zero_reg, offset);
1747  }
1748  break;
1749  case Uless_equal:
1750  if (r2.is(zero_reg)) {
1751  b(offset);
1752  } else {
1753  sltu(scratch, r2, rs);
1754  beq(scratch, zero_reg, offset);
1755  }
1756  break;
1757  default:
1758  UNREACHABLE();
1759  }
1760  } else {
1761  // Be careful to always use shifted_branch_offset only just before the
1762  // branch instruction, as the location will be remember for patching the
1763  // target.
1764  switch (cond) {
1765  case cc_always:
1766  b(offset);
1767  break;
1768  case eq:
1769  // We don't want any other register but scratch clobbered.
1770  ASSERT(!scratch.is(rs));
1771  r2 = scratch;
1772  li(r2, rt);
1773  beq(rs, r2, offset);
1774  break;
1775  case ne:
1776  // We don't want any other register but scratch clobbered.
1777  ASSERT(!scratch.is(rs));
1778  r2 = scratch;
1779  li(r2, rt);
1780  bne(rs, r2, offset);
1781  break;
1782  // Signed comparison.
1783  case greater:
1784  if (rt.imm32_ == 0) {
1785  bgtz(rs, offset);
1786  } else {
1787  r2 = scratch;
1788  li(r2, rt);
1789  slt(scratch, r2, rs);
1790  bne(scratch, zero_reg, offset);
1791  }
1792  break;
1793  case greater_equal:
1794  if (rt.imm32_ == 0) {
1795  bgez(rs, offset);
1796  } else if (is_int16(rt.imm32_)) {
1797  slti(scratch, rs, rt.imm32_);
1798  beq(scratch, zero_reg, offset);
1799  } else {
1800  r2 = scratch;
1801  li(r2, rt);
1802  slt(scratch, rs, r2);
1803  beq(scratch, zero_reg, offset);
1804  }
1805  break;
1806  case less:
1807  if (rt.imm32_ == 0) {
1808  bltz(rs, offset);
1809  } else if (is_int16(rt.imm32_)) {
1810  slti(scratch, rs, rt.imm32_);
1811  bne(scratch, zero_reg, offset);
1812  } else {
1813  r2 = scratch;
1814  li(r2, rt);
1815  slt(scratch, rs, r2);
1816  bne(scratch, zero_reg, offset);
1817  }
1818  break;
1819  case less_equal:
1820  if (rt.imm32_ == 0) {
1821  blez(rs, offset);
1822  } else {
1823  r2 = scratch;
1824  li(r2, rt);
1825  slt(scratch, r2, rs);
1826  beq(scratch, zero_reg, offset);
1827  }
1828  break;
1829  // Unsigned comparison.
1830  case Ugreater:
1831  if (rt.imm32_ == 0) {
1832  bgtz(rs, offset);
1833  } else {
1834  r2 = scratch;
1835  li(r2, rt);
1836  sltu(scratch, r2, rs);
1837  bne(scratch, zero_reg, offset);
1838  }
1839  break;
1840  case Ugreater_equal:
1841  if (rt.imm32_ == 0) {
1842  bgez(rs, offset);
1843  } else if (is_int16(rt.imm32_)) {
1844  sltiu(scratch, rs, rt.imm32_);
1845  beq(scratch, zero_reg, offset);
1846  } else {
1847  r2 = scratch;
1848  li(r2, rt);
1849  sltu(scratch, rs, r2);
1850  beq(scratch, zero_reg, offset);
1851  }
1852  break;
1853  case Uless:
1854  if (rt.imm32_ == 0) {
1855  // No code needs to be emitted.
1856  return;
1857  } else if (is_int16(rt.imm32_)) {
1858  sltiu(scratch, rs, rt.imm32_);
1859  bne(scratch, zero_reg, offset);
1860  } else {
1861  r2 = scratch;
1862  li(r2, rt);
1863  sltu(scratch, rs, r2);
1864  bne(scratch, zero_reg, offset);
1865  }
1866  break;
1867  case Uless_equal:
1868  if (rt.imm32_ == 0) {
1869  b(offset);
1870  } else {
1871  r2 = scratch;
1872  li(r2, rt);
1873  sltu(scratch, r2, rs);
1874  beq(scratch, zero_reg, offset);
1875  }
1876  break;
1877  default:
1878  UNREACHABLE();
1879  }
1880  }
1881  // Emit a nop in the branch delay slot if required.
1882  if (bdslot == PROTECT)
1883  nop();
1884 }
1885 
1886 
1887 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1888  // We use branch_offset as an argument for the branch instructions to be sure
1889  // it is called just before generating the branch instruction, as needed.
1890 
1891  b(shifted_branch_offset(L, false));
1892 
1893  // Emit a nop in the branch delay slot if required.
1894  if (bdslot == PROTECT)
1895  nop();
1896 }
1897 
1898 
1899 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1900  const Operand& rt,
1901  BranchDelaySlot bdslot) {
1902  BRANCH_ARGS_CHECK(cond, rs, rt);
1903 
1904  int32_t offset;
1905  Register r2 = no_reg;
1906  Register scratch = at;
1907  if (rt.is_reg()) {
1908  r2 = rt.rm_;
1909  // Be careful to always use shifted_branch_offset only just before the
1910  // branch instruction, as the location will be remember for patching the
1911  // target.
1912  switch (cond) {
1913  case cc_always:
1914  offset = shifted_branch_offset(L, false);
1915  b(offset);
1916  break;
1917  case eq:
1918  offset = shifted_branch_offset(L, false);
1919  beq(rs, r2, offset);
1920  break;
1921  case ne:
1922  offset = shifted_branch_offset(L, false);
1923  bne(rs, r2, offset);
1924  break;
1925  // Signed comparison.
1926  case greater:
1927  if (r2.is(zero_reg)) {
1928  offset = shifted_branch_offset(L, false);
1929  bgtz(rs, offset);
1930  } else {
1931  slt(scratch, r2, rs);
1932  offset = shifted_branch_offset(L, false);
1933  bne(scratch, zero_reg, offset);
1934  }
1935  break;
1936  case greater_equal:
1937  if (r2.is(zero_reg)) {
1938  offset = shifted_branch_offset(L, false);
1939  bgez(rs, offset);
1940  } else {
1941  slt(scratch, rs, r2);
1942  offset = shifted_branch_offset(L, false);
1943  beq(scratch, zero_reg, offset);
1944  }
1945  break;
1946  case less:
1947  if (r2.is(zero_reg)) {
1948  offset = shifted_branch_offset(L, false);
1949  bltz(rs, offset);
1950  } else {
1951  slt(scratch, rs, r2);
1952  offset = shifted_branch_offset(L, false);
1953  bne(scratch, zero_reg, offset);
1954  }
1955  break;
1956  case less_equal:
1957  if (r2.is(zero_reg)) {
1958  offset = shifted_branch_offset(L, false);
1959  blez(rs, offset);
1960  } else {
1961  slt(scratch, r2, rs);
1962  offset = shifted_branch_offset(L, false);
1963  beq(scratch, zero_reg, offset);
1964  }
1965  break;
1966  // Unsigned comparison.
1967  case Ugreater:
1968  if (r2.is(zero_reg)) {
1969  offset = shifted_branch_offset(L, false);
1970  bgtz(rs, offset);
1971  } else {
1972  sltu(scratch, r2, rs);
1973  offset = shifted_branch_offset(L, false);
1974  bne(scratch, zero_reg, offset);
1975  }
1976  break;
1977  case Ugreater_equal:
1978  if (r2.is(zero_reg)) {
1979  offset = shifted_branch_offset(L, false);
1980  bgez(rs, offset);
1981  } else {
1982  sltu(scratch, rs, r2);
1983  offset = shifted_branch_offset(L, false);
1984  beq(scratch, zero_reg, offset);
1985  }
1986  break;
1987  case Uless:
1988  if (r2.is(zero_reg)) {
1989  // No code needs to be emitted.
1990  return;
1991  } else {
1992  sltu(scratch, rs, r2);
1993  offset = shifted_branch_offset(L, false);
1994  bne(scratch, zero_reg, offset);
1995  }
1996  break;
1997  case Uless_equal:
1998  if (r2.is(zero_reg)) {
1999  offset = shifted_branch_offset(L, false);
2000  b(offset);
2001  } else {
2002  sltu(scratch, r2, rs);
2003  offset = shifted_branch_offset(L, false);
2004  beq(scratch, zero_reg, offset);
2005  }
2006  break;
2007  default:
2008  UNREACHABLE();
2009  }
2010  } else {
2011  // Be careful to always use shifted_branch_offset only just before the
2012  // branch instruction, as the location will be remember for patching the
2013  // target.
2014  switch (cond) {
2015  case cc_always:
2016  offset = shifted_branch_offset(L, false);
2017  b(offset);
2018  break;
2019  case eq:
2020  ASSERT(!scratch.is(rs));
2021  r2 = scratch;
2022  li(r2, rt);
2023  offset = shifted_branch_offset(L, false);
2024  beq(rs, r2, offset);
2025  break;
2026  case ne:
2027  ASSERT(!scratch.is(rs));
2028  r2 = scratch;
2029  li(r2, rt);
2030  offset = shifted_branch_offset(L, false);
2031  bne(rs, r2, offset);
2032  break;
2033  // Signed comparison.
2034  case greater:
2035  if (rt.imm32_ == 0) {
2036  offset = shifted_branch_offset(L, false);
2037  bgtz(rs, offset);
2038  } else {
2039  ASSERT(!scratch.is(rs));
2040  r2 = scratch;
2041  li(r2, rt);
2042  slt(scratch, r2, rs);
2043  offset = shifted_branch_offset(L, false);
2044  bne(scratch, zero_reg, offset);
2045  }
2046  break;
2047  case greater_equal:
2048  if (rt.imm32_ == 0) {
2049  offset = shifted_branch_offset(L, false);
2050  bgez(rs, offset);
2051  } else if (is_int16(rt.imm32_)) {
2052  slti(scratch, rs, rt.imm32_);
2053  offset = shifted_branch_offset(L, false);
2054  beq(scratch, zero_reg, offset);
2055  } else {
2056  ASSERT(!scratch.is(rs));
2057  r2 = scratch;
2058  li(r2, rt);
2059  slt(scratch, rs, r2);
2060  offset = shifted_branch_offset(L, false);
2061  beq(scratch, zero_reg, offset);
2062  }
2063  break;
2064  case less:
2065  if (rt.imm32_ == 0) {
2066  offset = shifted_branch_offset(L, false);
2067  bltz(rs, offset);
2068  } else if (is_int16(rt.imm32_)) {
2069  slti(scratch, rs, rt.imm32_);
2070  offset = shifted_branch_offset(L, false);
2071  bne(scratch, zero_reg, offset);
2072  } else {
2073  ASSERT(!scratch.is(rs));
2074  r2 = scratch;
2075  li(r2, rt);
2076  slt(scratch, rs, r2);
2077  offset = shifted_branch_offset(L, false);
2078  bne(scratch, zero_reg, offset);
2079  }
2080  break;
2081  case less_equal:
2082  if (rt.imm32_ == 0) {
2083  offset = shifted_branch_offset(L, false);
2084  blez(rs, offset);
2085  } else {
2086  ASSERT(!scratch.is(rs));
2087  r2 = scratch;
2088  li(r2, rt);
2089  slt(scratch, r2, rs);
2090  offset = shifted_branch_offset(L, false);
2091  beq(scratch, zero_reg, offset);
2092  }
2093  break;
2094  // Unsigned comparison.
2095  case Ugreater:
2096  if (rt.imm32_ == 0) {
2097  offset = shifted_branch_offset(L, false);
2098  bgtz(rs, offset);
2099  } else {
2100  ASSERT(!scratch.is(rs));
2101  r2 = scratch;
2102  li(r2, rt);
2103  sltu(scratch, r2, rs);
2104  offset = shifted_branch_offset(L, false);
2105  bne(scratch, zero_reg, offset);
2106  }
2107  break;
2108  case Ugreater_equal:
2109  if (rt.imm32_ == 0) {
2110  offset = shifted_branch_offset(L, false);
2111  bgez(rs, offset);
2112  } else if (is_int16(rt.imm32_)) {
2113  sltiu(scratch, rs, rt.imm32_);
2114  offset = shifted_branch_offset(L, false);
2115  beq(scratch, zero_reg, offset);
2116  } else {
2117  ASSERT(!scratch.is(rs));
2118  r2 = scratch;
2119  li(r2, rt);
2120  sltu(scratch, rs, r2);
2121  offset = shifted_branch_offset(L, false);
2122  beq(scratch, zero_reg, offset);
2123  }
2124  break;
2125  case Uless:
2126  if (rt.imm32_ == 0) {
2127  // No code needs to be emitted.
2128  return;
2129  } else if (is_int16(rt.imm32_)) {
2130  sltiu(scratch, rs, rt.imm32_);
2131  offset = shifted_branch_offset(L, false);
2132  bne(scratch, zero_reg, offset);
2133  } else {
2134  ASSERT(!scratch.is(rs));
2135  r2 = scratch;
2136  li(r2, rt);
2137  sltu(scratch, rs, r2);
2138  offset = shifted_branch_offset(L, false);
2139  bne(scratch, zero_reg, offset);
2140  }
2141  break;
2142  case Uless_equal:
2143  if (rt.imm32_ == 0) {
2144  offset = shifted_branch_offset(L, false);
2145  b(offset);
2146  } else {
2147  ASSERT(!scratch.is(rs));
2148  r2 = scratch;
2149  li(r2, rt);
2150  sltu(scratch, r2, rs);
2151  offset = shifted_branch_offset(L, false);
2152  beq(scratch, zero_reg, offset);
2153  }
2154  break;
2155  default:
2156  UNREACHABLE();
2157  }
2158  }
2159  // Check that offset could actually hold on an int16_t.
2160  ASSERT(is_int16(offset));
2161  // Emit a nop in the branch delay slot if required.
2162  if (bdslot == PROTECT)
2163  nop();
2164 }
2165 
2166 
2167 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2168  BranchAndLinkShort(offset, bdslot);
2169 }
2170 
2171 
2172 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2173  const Operand& rt,
2174  BranchDelaySlot bdslot) {
2175  BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2176 }
2177 
2178 
2179 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2180  if (L->is_bound()) {
2181  if (is_near(L)) {
2182  BranchAndLinkShort(L, bdslot);
2183  } else {
2184  Jalr(L, bdslot);
2185  }
2186  } else {
2187  if (is_trampoline_emitted()) {
2188  Jalr(L, bdslot);
2189  } else {
2190  BranchAndLinkShort(L, bdslot);
2191  }
2192  }
2193 }
2194 
2195 
2196 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2197  const Operand& rt,
2198  BranchDelaySlot bdslot) {
2199  if (L->is_bound()) {
2200  if (is_near(L)) {
2201  BranchAndLinkShort(L, cond, rs, rt, bdslot);
2202  } else {
2203  Label skip;
2204  Condition neg_cond = NegateCondition(cond);
2205  BranchShort(&skip, neg_cond, rs, rt);
2206  Jalr(L, bdslot);
2207  bind(&skip);
2208  }
2209  } else {
2210  if (is_trampoline_emitted()) {
2211  Label skip;
2212  Condition neg_cond = NegateCondition(cond);
2213  BranchShort(&skip, neg_cond, rs, rt);
2214  Jalr(L, bdslot);
2215  bind(&skip);
2216  } else {
2217  BranchAndLinkShort(L, cond, rs, rt, bdslot);
2218  }
2219  }
2220 }
2221 
2222 
2223 // We need to use a bgezal or bltzal, but they can't be used directly with the
2224 // slt instructions. We could use sub or add instead but we would miss overflow
2225 // cases, so we keep slt and add an intermediate third instruction.
2226 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2227  BranchDelaySlot bdslot) {
2228  bal(offset);
2229 
2230  // Emit a nop in the branch delay slot if required.
2231  if (bdslot == PROTECT)
2232  nop();
2233 }
2234 
2235 
2236 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2237  Register rs, const Operand& rt,
2238  BranchDelaySlot bdslot) {
2239  BRANCH_ARGS_CHECK(cond, rs, rt);
2240  Register r2 = no_reg;
2241  Register scratch = at;
2242 
2243  if (rt.is_reg()) {
2244  r2 = rt.rm_;
2245  } else if (cond != cc_always) {
2246  r2 = scratch;
2247  li(r2, rt);
2248  }
2249 
2250  switch (cond) {
2251  case cc_always:
2252  bal(offset);
2253  break;
2254  case eq:
2255  bne(rs, r2, 2);
2256  nop();
2257  bal(offset);
2258  break;
2259  case ne:
2260  beq(rs, r2, 2);
2261  nop();
2262  bal(offset);
2263  break;
2264 
2265  // Signed comparison.
2266  case greater:
2267  slt(scratch, r2, rs);
2268  addiu(scratch, scratch, -1);
2269  bgezal(scratch, offset);
2270  break;
2271  case greater_equal:
2272  slt(scratch, rs, r2);
2273  addiu(scratch, scratch, -1);
2274  bltzal(scratch, offset);
2275  break;
2276  case less:
2277  slt(scratch, rs, r2);
2278  addiu(scratch, scratch, -1);
2279  bgezal(scratch, offset);
2280  break;
2281  case less_equal:
2282  slt(scratch, r2, rs);
2283  addiu(scratch, scratch, -1);
2284  bltzal(scratch, offset);
2285  break;
2286 
2287  // Unsigned comparison.
2288  case Ugreater:
2289  sltu(scratch, r2, rs);
2290  addiu(scratch, scratch, -1);
2291  bgezal(scratch, offset);
2292  break;
2293  case Ugreater_equal:
2294  sltu(scratch, rs, r2);
2295  addiu(scratch, scratch, -1);
2296  bltzal(scratch, offset);
2297  break;
2298  case Uless:
2299  sltu(scratch, rs, r2);
2300  addiu(scratch, scratch, -1);
2301  bgezal(scratch, offset);
2302  break;
2303  case Uless_equal:
2304  sltu(scratch, r2, rs);
2305  addiu(scratch, scratch, -1);
2306  bltzal(scratch, offset);
2307  break;
2308 
2309  default:
2310  UNREACHABLE();
2311  }
2312  // Emit a nop in the branch delay slot if required.
2313  if (bdslot == PROTECT)
2314  nop();
2315 }
2316 
2317 
2318 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2319  bal(shifted_branch_offset(L, false));
2320 
2321  // Emit a nop in the branch delay slot if required.
2322  if (bdslot == PROTECT)
2323  nop();
2324 }
2325 
2326 
2327 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2328  const Operand& rt,
2329  BranchDelaySlot bdslot) {
2330  BRANCH_ARGS_CHECK(cond, rs, rt);
2331 
2332  int32_t offset;
2333  Register r2 = no_reg;
2334  Register scratch = at;
2335  if (rt.is_reg()) {
2336  r2 = rt.rm_;
2337  } else if (cond != cc_always) {
2338  r2 = scratch;
2339  li(r2, rt);
2340  }
2341 
2342  switch (cond) {
2343  case cc_always:
2344  offset = shifted_branch_offset(L, false);
2345  bal(offset);
2346  break;
2347  case eq:
2348  bne(rs, r2, 2);
2349  nop();
2350  offset = shifted_branch_offset(L, false);
2351  bal(offset);
2352  break;
2353  case ne:
2354  beq(rs, r2, 2);
2355  nop();
2356  offset = shifted_branch_offset(L, false);
2357  bal(offset);
2358  break;
2359 
2360  // Signed comparison.
2361  case greater:
2362  slt(scratch, r2, rs);
2363  addiu(scratch, scratch, -1);
2364  offset = shifted_branch_offset(L, false);
2365  bgezal(scratch, offset);
2366  break;
2367  case greater_equal:
2368  slt(scratch, rs, r2);
2369  addiu(scratch, scratch, -1);
2370  offset = shifted_branch_offset(L, false);
2371  bltzal(scratch, offset);
2372  break;
2373  case less:
2374  slt(scratch, rs, r2);
2375  addiu(scratch, scratch, -1);
2376  offset = shifted_branch_offset(L, false);
2377  bgezal(scratch, offset);
2378  break;
2379  case less_equal:
2380  slt(scratch, r2, rs);
2381  addiu(scratch, scratch, -1);
2382  offset = shifted_branch_offset(L, false);
2383  bltzal(scratch, offset);
2384  break;
2385 
2386  // Unsigned comparison.
2387  case Ugreater:
2388  sltu(scratch, r2, rs);
2389  addiu(scratch, scratch, -1);
2390  offset = shifted_branch_offset(L, false);
2391  bgezal(scratch, offset);
2392  break;
2393  case Ugreater_equal:
2394  sltu(scratch, rs, r2);
2395  addiu(scratch, scratch, -1);
2396  offset = shifted_branch_offset(L, false);
2397  bltzal(scratch, offset);
2398  break;
2399  case Uless:
2400  sltu(scratch, rs, r2);
2401  addiu(scratch, scratch, -1);
2402  offset = shifted_branch_offset(L, false);
2403  bgezal(scratch, offset);
2404  break;
2405  case Uless_equal:
2406  sltu(scratch, r2, rs);
2407  addiu(scratch, scratch, -1);
2408  offset = shifted_branch_offset(L, false);
2409  bltzal(scratch, offset);
2410  break;
2411 
2412  default:
2413  UNREACHABLE();
2414  }
2415 
2416  // Check that offset could actually hold on an int16_t.
2417  ASSERT(is_int16(offset));
2418 
2419  // Emit a nop in the branch delay slot if required.
2420  if (bdslot == PROTECT)
2421  nop();
2422 }
2423 
2424 
2425 void MacroAssembler::Jump(Register target,
2426  Condition cond,
2427  Register rs,
2428  const Operand& rt,
2429  BranchDelaySlot bd) {
2430  BlockTrampolinePoolScope block_trampoline_pool(this);
2431  if (cond == cc_always) {
2432  jr(target);
2433  } else {
2434  BRANCH_ARGS_CHECK(cond, rs, rt);
2435  Branch(2, NegateCondition(cond), rs, rt);
2436  jr(target);
2437  }
2438  // Emit a nop in the branch delay slot if required.
2439  if (bd == PROTECT)
2440  nop();
2441 }
2442 
2443 
2444 void MacroAssembler::Jump(intptr_t target,
2445  RelocInfo::Mode rmode,
2446  Condition cond,
2447  Register rs,
2448  const Operand& rt,
2449  BranchDelaySlot bd) {
2450  Label skip;
2451  if (cond != cc_always) {
2452  Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2453  }
2454  // The first instruction of 'li' may be placed in the delay slot.
2455  // This is not an issue, t9 is expected to be clobbered anyway.
2456  li(t9, Operand(target, rmode));
2457  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2458  bind(&skip);
2459 }
2460 
2461 
2462 void MacroAssembler::Jump(Address target,
2463  RelocInfo::Mode rmode,
2464  Condition cond,
2465  Register rs,
2466  const Operand& rt,
2467  BranchDelaySlot bd) {
2468  ASSERT(!RelocInfo::IsCodeTarget(rmode));
2469  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2470 }
2471 
2472 
2473 void MacroAssembler::Jump(Handle<Code> code,
2474  RelocInfo::Mode rmode,
2475  Condition cond,
2476  Register rs,
2477  const Operand& rt,
2478  BranchDelaySlot bd) {
2479  ASSERT(RelocInfo::IsCodeTarget(rmode));
2480  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2481 }
2482 
2483 
2484 int MacroAssembler::CallSize(Register target,
2485  Condition cond,
2486  Register rs,
2487  const Operand& rt,
2488  BranchDelaySlot bd) {
2489  int size = 0;
2490 
2491  if (cond == cc_always) {
2492  size += 1;
2493  } else {
2494  size += 3;
2495  }
2496 
2497  if (bd == PROTECT)
2498  size += 1;
2499 
2500  return size * kInstrSize;
2501 }
2502 
2503 
2504 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2505 void MacroAssembler::Call(Register target,
2506  Condition cond,
2507  Register rs,
2508  const Operand& rt,
2509  BranchDelaySlot bd) {
2510  BlockTrampolinePoolScope block_trampoline_pool(this);
2511  Label start;
2512  bind(&start);
2513  if (cond == cc_always) {
2514  jalr(target);
2515  } else {
2516  BRANCH_ARGS_CHECK(cond, rs, rt);
2517  Branch(2, NegateCondition(cond), rs, rt);
2518  jalr(target);
2519  }
2520  // Emit a nop in the branch delay slot if required.
2521  if (bd == PROTECT)
2522  nop();
2523 
2524  ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2525  SizeOfCodeGeneratedSince(&start));
2526 }
2527 
2528 
2529 int MacroAssembler::CallSize(Address target,
2530  RelocInfo::Mode rmode,
2531  Condition cond,
2532  Register rs,
2533  const Operand& rt,
2534  BranchDelaySlot bd) {
2535  int size = CallSize(t9, cond, rs, rt, bd);
2536  return size + 2 * kInstrSize;
2537 }
2538 
2539 
2540 void MacroAssembler::Call(Address target,
2541  RelocInfo::Mode rmode,
2542  Condition cond,
2543  Register rs,
2544  const Operand& rt,
2545  BranchDelaySlot bd) {
2546  BlockTrampolinePoolScope block_trampoline_pool(this);
2547  Label start;
2548  bind(&start);
2549  int32_t target_int = reinterpret_cast<int32_t>(target);
2550  // Must record previous source positions before the
2551  // li() generates a new code target.
2552  positions_recorder()->WriteRecordedPositions();
2553  li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2554  Call(t9, cond, rs, rt, bd);
2555  ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2556  SizeOfCodeGeneratedSince(&start));
2557 }
2558 
2559 
2560 int MacroAssembler::CallSize(Handle<Code> code,
2561  RelocInfo::Mode rmode,
2562  unsigned ast_id,
2563  Condition cond,
2564  Register rs,
2565  const Operand& rt,
2566  BranchDelaySlot bd) {
2567  return CallSize(reinterpret_cast<Address>(code.location()),
2568  rmode, cond, rs, rt, bd);
2569 }
2570 
2571 
2572 void MacroAssembler::Call(Handle<Code> code,
2573  RelocInfo::Mode rmode,
2574  unsigned ast_id,
2575  Condition cond,
2576  Register rs,
2577  const Operand& rt,
2578  BranchDelaySlot bd) {
2579  BlockTrampolinePoolScope block_trampoline_pool(this);
2580  Label start;
2581  bind(&start);
2582  ASSERT(RelocInfo::IsCodeTarget(rmode));
2583  if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
2584  SetRecordedAstId(ast_id);
2585  rmode = RelocInfo::CODE_TARGET_WITH_ID;
2586  }
2587  Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2588  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2589  SizeOfCodeGeneratedSince(&start));
2590 }
2591 
2592 
2593 void MacroAssembler::Ret(Condition cond,
2594  Register rs,
2595  const Operand& rt,
2596  BranchDelaySlot bd) {
2597  Jump(ra, cond, rs, rt, bd);
2598 }
2599 
2600 
2601 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2602  BlockTrampolinePoolScope block_trampoline_pool(this);
2603 
2604  uint32_t imm28;
2605  imm28 = jump_address(L);
2606  imm28 &= kImm28Mask;
2607  { BlockGrowBufferScope block_buf_growth(this);
2608  // Buffer growth (and relocation) must be blocked for internal references
2609  // until associated instructions are emitted and available to be patched.
2610  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2611  j(imm28);
2612  }
2613  // Emit a nop in the branch delay slot if required.
2614  if (bdslot == PROTECT)
2615  nop();
2616 }
2617 
2618 
2619 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2620  BlockTrampolinePoolScope block_trampoline_pool(this);
2621 
2622  uint32_t imm32;
2623  imm32 = jump_address(L);
2624  { BlockGrowBufferScope block_buf_growth(this);
2625  // Buffer growth (and relocation) must be blocked for internal references
2626  // until associated instructions are emitted and available to be patched.
2627  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2628  lui(at, (imm32 & kHiMask) >> kLuiShift);
2629  ori(at, at, (imm32 & kImm16Mask));
2630  }
2631  jr(at);
2632 
2633  // Emit a nop in the branch delay slot if required.
2634  if (bdslot == PROTECT)
2635  nop();
2636 }
2637 
2638 
2639 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2640  BlockTrampolinePoolScope block_trampoline_pool(this);
2641 
2642  uint32_t imm32;
2643  imm32 = jump_address(L);
2644  { BlockGrowBufferScope block_buf_growth(this);
2645  // Buffer growth (and relocation) must be blocked for internal references
2646  // until associated instructions are emitted and available to be patched.
2647  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2648  lui(at, (imm32 & kHiMask) >> kLuiShift);
2649  ori(at, at, (imm32 & kImm16Mask));
2650  }
2651  jalr(at);
2652 
2653  // Emit a nop in the branch delay slot if required.
2654  if (bdslot == PROTECT)
2655  nop();
2656 }
2657 
2658 void MacroAssembler::DropAndRet(int drop) {
2659  Ret(USE_DELAY_SLOT);
2660  addiu(sp, sp, drop * kPointerSize);
2661 }
2662 
2663 void MacroAssembler::DropAndRet(int drop,
2664  Condition cond,
2665  Register r1,
2666  const Operand& r2) {
2667  // Both Drop and Ret need to be conditional.
2668  Label skip;
2669  if (cond != cc_always) {
2670  Branch(&skip, NegateCondition(cond), r1, r2);
2671  }
2672 
2673  Drop(drop);
2674  Ret();
2675 
2676  if (cond != cc_always) {
2677  bind(&skip);
2678  }
2679 }
2680 
2681 
2682 void MacroAssembler::Drop(int count,
2683  Condition cond,
2684  Register reg,
2685  const Operand& op) {
2686  if (count <= 0) {
2687  return;
2688  }
2689 
2690  Label skip;
2691 
2692  if (cond != al) {
2693  Branch(&skip, NegateCondition(cond), reg, op);
2694  }
2695 
2696  addiu(sp, sp, count * kPointerSize);
2697 
2698  if (cond != al) {
2699  bind(&skip);
2700  }
2701 }
2702 
2703 
2704 
2705 void MacroAssembler::Swap(Register reg1,
2706  Register reg2,
2707  Register scratch) {
2708  if (scratch.is(no_reg)) {
2709  Xor(reg1, reg1, Operand(reg2));
2710  Xor(reg2, reg2, Operand(reg1));
2711  Xor(reg1, reg1, Operand(reg2));
2712  } else {
2713  mov(scratch, reg1);
2714  mov(reg1, reg2);
2715  mov(reg2, scratch);
2716  }
2717 }
2718 
2719 
2720 void MacroAssembler::Call(Label* target) {
2721  BranchAndLink(target);
2722 }
2723 
2724 
2725 void MacroAssembler::Push(Handle<Object> handle) {
2726  li(at, Operand(handle));
2727  push(at);
2728 }
2729 
2730 
2731 #ifdef ENABLE_DEBUGGER_SUPPORT
2732 
2733 void MacroAssembler::DebugBreak() {
2734  PrepareCEntryArgs(0);
2735  PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2736  CEntryStub ces(1);
2737  ASSERT(AllowThisStubCall(&ces));
2738  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2739 }
2740 
2741 #endif // ENABLE_DEBUGGER_SUPPORT
2742 
2743 
2744 // ---------------------------------------------------------------------------
2745 // Exception handling.
2746 
2747 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2748  int handler_index) {
2749  // Adjust this code if not the case.
2750  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2751  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2752  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2753  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2754  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2755  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2756 
2757  // For the JSEntry handler, we must preserve a0-a3 and s0.
2758  // t1-t3 are available. We will build up the handler from the bottom by
2759  // pushing on the stack.
2760  // Set up the code object (t1) and the state (t2) for pushing.
2761  unsigned state =
2762  StackHandler::IndexField::encode(handler_index) |
2763  StackHandler::KindField::encode(kind);
2764  li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2765  li(t2, Operand(state));
2766 
2767  // Push the frame pointer, context, state, and code object.
2768  if (kind == StackHandler::JS_ENTRY) {
2769  ASSERT_EQ(Smi::FromInt(0), 0);
2770  // The second zero_reg indicates no context.
2771  // The first zero_reg is the NULL frame pointer.
2772  // The operands are reversed to match the order of MultiPush/Pop.
2773  Push(zero_reg, zero_reg, t2, t1);
2774  } else {
2775  MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2776  }
2777 
2778  // Link the current handler as the next handler.
2779  li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2780  lw(t1, MemOperand(t2));
2781  push(t1);
2782  // Set this new handler as the current one.
2783  sw(sp, MemOperand(t2));
2784 }
2785 
2786 
2787 void MacroAssembler::PopTryHandler() {
2788  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2789  pop(a1);
2790  Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2791  li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2792  sw(a1, MemOperand(at));
2793 }
2794 
2795 
2796 void MacroAssembler::JumpToHandlerEntry() {
2797  // Compute the handler entry address and jump to it. The handler table is
2798  // a fixed array of (smi-tagged) code offsets.
2799  // v0 = exception, a1 = code object, a2 = state.
2800  lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
2801  Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2802  srl(a2, a2, StackHandler::kKindWidth); // Handler index.
2803  sll(a2, a2, kPointerSizeLog2);
2804  Addu(a2, a3, a2);
2805  lw(a2, MemOperand(a2)); // Smi-tagged offset.
2806  Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
2807  sra(t9, a2, kSmiTagSize);
2808  Addu(t9, t9, a1);
2809  Jump(t9); // Jump.
2810 }
2811 
2812 
2813 void MacroAssembler::Throw(Register value) {
2814  // Adjust this code if not the case.
2815  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2816  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2817  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2818  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2819  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2820  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2821 
2822  // The exception is expected in v0.
2823  Move(v0, value);
2824 
2825  // Drop the stack pointer to the top of the top handler.
2826  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2827  isolate())));
2828  lw(sp, MemOperand(a3));
2829 
2830  // Restore the next handler.
2831  pop(a2);
2832  sw(a2, MemOperand(a3));
2833 
2834  // Get the code object (a1) and state (a2). Restore the context and frame
2835  // pointer.
2836  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2837 
2838  // If the handler is a JS frame, restore the context to the frame.
2839  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2840  // or cp.
2841  Label done;
2842  Branch(&done, eq, cp, Operand(zero_reg));
2843  sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2844  bind(&done);
2845 
2846  JumpToHandlerEntry();
2847 }
2848 
2849 
2850 void MacroAssembler::ThrowUncatchable(Register value) {
2851  // Adjust this code if not the case.
2852  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2853  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2854  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2855  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2856  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2857  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2858 
2859  // The exception is expected in v0.
2860  if (!value.is(v0)) {
2861  mov(v0, value);
2862  }
2863  // Drop the stack pointer to the top of the top stack handler.
2864  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2865  lw(sp, MemOperand(a3));
2866 
2867  // Unwind the handlers until the ENTRY handler is found.
2868  Label fetch_next, check_kind;
2869  jmp(&check_kind);
2870  bind(&fetch_next);
2871  lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2872 
2873  bind(&check_kind);
2874  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2875  lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2876  And(a2, a2, Operand(StackHandler::KindField::kMask));
2877  Branch(&fetch_next, ne, a2, Operand(zero_reg));
2878 
2879  // Set the top handler address to next handler past the top ENTRY handler.
2880  pop(a2);
2881  sw(a2, MemOperand(a3));
2882 
2883  // Get the code object (a1) and state (a2). Clear the context and frame
2884  // pointer (0 was saved in the handler).
2885  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2886 
2887  JumpToHandlerEntry();
2888 }
2889 
2890 
2891 void MacroAssembler::AllocateInNewSpace(int object_size,
2892  Register result,
2893  Register scratch1,
2894  Register scratch2,
2895  Label* gc_required,
2897  if (!FLAG_inline_new) {
2898  if (emit_debug_code()) {
2899  // Trash the registers to simulate an allocation failure.
2900  li(result, 0x7091);
2901  li(scratch1, 0x7191);
2902  li(scratch2, 0x7291);
2903  }
2904  jmp(gc_required);
2905  return;
2906  }
2907 
2908  ASSERT(!result.is(scratch1));
2909  ASSERT(!result.is(scratch2));
2910  ASSERT(!scratch1.is(scratch2));
2911  ASSERT(!scratch1.is(t9));
2912  ASSERT(!scratch2.is(t9));
2913  ASSERT(!result.is(t9));
2914 
2915  // Make object size into bytes.
2916  if ((flags & SIZE_IN_WORDS) != 0) {
2917  object_size *= kPointerSize;
2918  }
2919  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
2920 
2921  // Check relative positions of allocation top and limit addresses.
2922  // ARM adds additional checks to make sure the ldm instruction can be
2923  // used. On MIPS we don't have ldm so we don't need additional checks either.
2924  ExternalReference new_space_allocation_top =
2925  ExternalReference::new_space_allocation_top_address(isolate());
2926  ExternalReference new_space_allocation_limit =
2927  ExternalReference::new_space_allocation_limit_address(isolate());
2928  intptr_t top =
2929  reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2930  intptr_t limit =
2931  reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2932  ASSERT((limit - top) == kPointerSize);
2933 
2934  // Set up allocation top address and object size registers.
2935  Register topaddr = scratch1;
2936  Register obj_size_reg = scratch2;
2937  li(topaddr, Operand(new_space_allocation_top));
2938  li(obj_size_reg, Operand(object_size));
2939 
2940  // This code stores a temporary value in t9.
2941  if ((flags & RESULT_CONTAINS_TOP) == 0) {
2942  // Load allocation top into result and allocation limit into t9.
2943  lw(result, MemOperand(topaddr));
2944  lw(t9, MemOperand(topaddr, kPointerSize));
2945  } else {
2946  if (emit_debug_code()) {
2947  // Assert that result actually contains top on entry. t9 is used
2948  // immediately below so this use of t9 does not cause difference with
2949  // respect to register content between debug and release mode.
2950  lw(t9, MemOperand(topaddr));
2951  Check(eq, "Unexpected allocation top", result, Operand(t9));
2952  }
2953  // Load allocation limit into t9. Result already contains allocation top.
2954  lw(t9, MemOperand(topaddr, limit - top));
2955  }
2956 
2957  // Calculate new top and bail out if new space is exhausted. Use result
2958  // to calculate the new top.
2959  Addu(scratch2, result, Operand(obj_size_reg));
2960  Branch(gc_required, Ugreater, scratch2, Operand(t9));
2961  sw(scratch2, MemOperand(topaddr));
2962 
2963  // Tag object if requested.
2964  if ((flags & TAG_OBJECT) != 0) {
2965  Addu(result, result, Operand(kHeapObjectTag));
2966  }
2967 }
2968 
2969 
2970 void MacroAssembler::AllocateInNewSpace(Register object_size,
2971  Register result,
2972  Register scratch1,
2973  Register scratch2,
2974  Label* gc_required,
2975  AllocationFlags flags) {
2976  if (!FLAG_inline_new) {
2977  if (emit_debug_code()) {
2978  // Trash the registers to simulate an allocation failure.
2979  li(result, 0x7091);
2980  li(scratch1, 0x7191);
2981  li(scratch2, 0x7291);
2982  }
2983  jmp(gc_required);
2984  return;
2985  }
2986 
2987  ASSERT(!result.is(scratch1));
2988  ASSERT(!result.is(scratch2));
2989  ASSERT(!scratch1.is(scratch2));
2990  ASSERT(!object_size.is(t9));
2991  ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2992 
2993  // Check relative positions of allocation top and limit addresses.
2994  // ARM adds additional checks to make sure the ldm instruction can be
2995  // used. On MIPS we don't have ldm so we don't need additional checks either.
2996  ExternalReference new_space_allocation_top =
2997  ExternalReference::new_space_allocation_top_address(isolate());
2998  ExternalReference new_space_allocation_limit =
2999  ExternalReference::new_space_allocation_limit_address(isolate());
3000  intptr_t top =
3001  reinterpret_cast<intptr_t>(new_space_allocation_top.address());
3002  intptr_t limit =
3003  reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
3004  ASSERT((limit - top) == kPointerSize);
3005 
3006  // Set up allocation top address and object size registers.
3007  Register topaddr = scratch1;
3008  li(topaddr, Operand(new_space_allocation_top));
3009 
3010  // This code stores a temporary value in t9.
3011  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3012  // Load allocation top into result and allocation limit into t9.
3013  lw(result, MemOperand(topaddr));
3014  lw(t9, MemOperand(topaddr, kPointerSize));
3015  } else {
3016  if (emit_debug_code()) {
3017  // Assert that result actually contains top on entry. t9 is used
3018  // immediately below so this use of t9 does not cause difference with
3019  // respect to register content between debug and release mode.
3020  lw(t9, MemOperand(topaddr));
3021  Check(eq, "Unexpected allocation top", result, Operand(t9));
3022  }
3023  // Load allocation limit into t9. Result already contains allocation top.
3024  lw(t9, MemOperand(topaddr, limit - top));
3025  }
3026 
3027  // Calculate new top and bail out if new space is exhausted. Use result
3028  // to calculate the new top. Object size may be in words so a shift is
3029  // required to get the number of bytes.
3030  if ((flags & SIZE_IN_WORDS) != 0) {
3031  sll(scratch2, object_size, kPointerSizeLog2);
3032  Addu(scratch2, result, scratch2);
3033  } else {
3034  Addu(scratch2, result, Operand(object_size));
3035  }
3036  Branch(gc_required, Ugreater, scratch2, Operand(t9));
3037 
3038  // Update allocation top. result temporarily holds the new top.
3039  if (emit_debug_code()) {
3040  And(t9, scratch2, Operand(kObjectAlignmentMask));
3041  Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
3042  }
3043  sw(scratch2, MemOperand(topaddr));
3044 
3045  // Tag object if requested.
3046  if ((flags & TAG_OBJECT) != 0) {
3047  Addu(result, result, Operand(kHeapObjectTag));
3048  }
3049 }
3050 
3051 
3052 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3053  Register scratch) {
3054  ExternalReference new_space_allocation_top =
3055  ExternalReference::new_space_allocation_top_address(isolate());
3056 
3057  // Make sure the object has no tag before resetting top.
3058  And(object, object, Operand(~kHeapObjectTagMask));
3059 #ifdef DEBUG
3060  // Check that the object un-allocated is below the current top.
3061  li(scratch, Operand(new_space_allocation_top));
3062  lw(scratch, MemOperand(scratch));
3063  Check(less, "Undo allocation of non allocated memory",
3064  object, Operand(scratch));
3065 #endif
3066  // Write the address of the object to un-allocate as the current top.
3067  li(scratch, Operand(new_space_allocation_top));
3068  sw(object, MemOperand(scratch));
3069 }
3070 
3071 
3072 void MacroAssembler::AllocateTwoByteString(Register result,
3073  Register length,
3074  Register scratch1,
3075  Register scratch2,
3076  Register scratch3,
3077  Label* gc_required) {
3078  // Calculate the number of bytes needed for the characters in the string while
3079  // observing object alignment.
3080  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3081  sll(scratch1, length, 1); // Length in bytes, not chars.
3082  addiu(scratch1, scratch1,
3083  kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3084  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3085 
3086  // Allocate two-byte string in new space.
3087  AllocateInNewSpace(scratch1,
3088  result,
3089  scratch2,
3090  scratch3,
3091  gc_required,
3092  TAG_OBJECT);
3093 
3094  // Set the map, length and hash field.
3095  InitializeNewString(result,
3096  length,
3097  Heap::kStringMapRootIndex,
3098  scratch1,
3099  scratch2);
3100 }
3101 
3102 
3103 void MacroAssembler::AllocateAsciiString(Register result,
3104  Register length,
3105  Register scratch1,
3106  Register scratch2,
3107  Register scratch3,
3108  Label* gc_required) {
3109  // Calculate the number of bytes needed for the characters in the string
3110  // while observing object alignment.
3111  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
3112  ASSERT(kCharSize == 1);
3113  addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
3114  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3115 
3116  // Allocate ASCII string in new space.
3117  AllocateInNewSpace(scratch1,
3118  result,
3119  scratch2,
3120  scratch3,
3121  gc_required,
3122  TAG_OBJECT);
3123 
3124  // Set the map, length and hash field.
3125  InitializeNewString(result,
3126  length,
3127  Heap::kAsciiStringMapRootIndex,
3128  scratch1,
3129  scratch2);
3130 }
3131 
3132 
3133 void MacroAssembler::AllocateTwoByteConsString(Register result,
3134  Register length,
3135  Register scratch1,
3136  Register scratch2,
3137  Label* gc_required) {
3138  AllocateInNewSpace(ConsString::kSize,
3139  result,
3140  scratch1,
3141  scratch2,
3142  gc_required,
3143  TAG_OBJECT);
3144  InitializeNewString(result,
3145  length,
3146  Heap::kConsStringMapRootIndex,
3147  scratch1,
3148  scratch2);
3149 }
3150 
3151 
3152 void MacroAssembler::AllocateAsciiConsString(Register result,
3153  Register length,
3154  Register scratch1,
3155  Register scratch2,
3156  Label* gc_required) {
3157  AllocateInNewSpace(ConsString::kSize,
3158  result,
3159  scratch1,
3160  scratch2,
3161  gc_required,
3162  TAG_OBJECT);
3163  InitializeNewString(result,
3164  length,
3165  Heap::kConsAsciiStringMapRootIndex,
3166  scratch1,
3167  scratch2);
3168 }
3169 
3170 
3171 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3172  Register length,
3173  Register scratch1,
3174  Register scratch2,
3175  Label* gc_required) {
3176  AllocateInNewSpace(SlicedString::kSize,
3177  result,
3178  scratch1,
3179  scratch2,
3180  gc_required,
3181  TAG_OBJECT);
3182 
3183  InitializeNewString(result,
3184  length,
3185  Heap::kSlicedStringMapRootIndex,
3186  scratch1,
3187  scratch2);
3188 }
3189 
3190 
3191 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3192  Register length,
3193  Register scratch1,
3194  Register scratch2,
3195  Label* gc_required) {
3196  AllocateInNewSpace(SlicedString::kSize,
3197  result,
3198  scratch1,
3199  scratch2,
3200  gc_required,
3201  TAG_OBJECT);
3202 
3203  InitializeNewString(result,
3204  length,
3205  Heap::kSlicedAsciiStringMapRootIndex,
3206  scratch1,
3207  scratch2);
3208 }
3209 
3210 
3211 // Allocates a heap number or jumps to the label if the young space is full and
3212 // a scavenge is needed.
3213 void MacroAssembler::AllocateHeapNumber(Register result,
3214  Register scratch1,
3215  Register scratch2,
3216  Register heap_number_map,
3217  Label* need_gc) {
3218  // Allocate an object in the heap for the heap number and tag it as a heap
3219  // object.
3220  AllocateInNewSpace(HeapNumber::kSize,
3221  result,
3222  scratch1,
3223  scratch2,
3224  need_gc,
3225  TAG_OBJECT);
3226 
3227  // Store heap number map in the allocated object.
3228  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3229  sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3230 }
3231 
3232 
3233 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3234  FPURegister value,
3235  Register scratch1,
3236  Register scratch2,
3237  Label* gc_required) {
3238  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3239  AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3240  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3241 }
3242 
3243 
3244 // Copies a fixed number of fields of heap objects from src to dst.
3245 void MacroAssembler::CopyFields(Register dst,
3246  Register src,
3247  RegList temps,
3248  int field_count) {
3249  ASSERT((temps & dst.bit()) == 0);
3250  ASSERT((temps & src.bit()) == 0);
3251  // Primitive implementation using only one temporary register.
3252 
3253  Register tmp = no_reg;
3254  // Find a temp register in temps list.
3255  for (int i = 0; i < kNumRegisters; i++) {
3256  if ((temps & (1 << i)) != 0) {
3257  tmp.code_ = i;
3258  break;
3259  }
3260  }
3261  ASSERT(!tmp.is(no_reg));
3262 
3263  for (int i = 0; i < field_count; i++) {
3264  lw(tmp, FieldMemOperand(src, i * kPointerSize));
3265  sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3266  }
3267 }
3268 
3269 
3270 void MacroAssembler::CopyBytes(Register src,
3271  Register dst,
3272  Register length,
3273  Register scratch) {
3274  Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3275 
3276  // Align src before copying in word size chunks.
3277  bind(&align_loop);
3278  Branch(&done, eq, length, Operand(zero_reg));
3279  bind(&align_loop_1);
3280  And(scratch, src, kPointerSize - 1);
3281  Branch(&word_loop, eq, scratch, Operand(zero_reg));
3282  lbu(scratch, MemOperand(src));
3283  Addu(src, src, 1);
3284  sb(scratch, MemOperand(dst));
3285  Addu(dst, dst, 1);
3286  Subu(length, length, Operand(1));
3287  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3288 
3289  // Copy bytes in word size chunks.
3290  bind(&word_loop);
3291  if (emit_debug_code()) {
3292  And(scratch, src, kPointerSize - 1);
3293  Assert(eq, "Expecting alignment for CopyBytes",
3294  scratch, Operand(zero_reg));
3295  }
3296  Branch(&byte_loop, lt, length, Operand(kPointerSize));
3297  lw(scratch, MemOperand(src));
3298  Addu(src, src, kPointerSize);
3299 
3300  // TODO(kalmard) check if this can be optimized to use sw in most cases.
3301  // Can't use unaligned access - copy byte by byte.
3302  sb(scratch, MemOperand(dst, 0));
3303  srl(scratch, scratch, 8);
3304  sb(scratch, MemOperand(dst, 1));
3305  srl(scratch, scratch, 8);
3306  sb(scratch, MemOperand(dst, 2));
3307  srl(scratch, scratch, 8);
3308  sb(scratch, MemOperand(dst, 3));
3309  Addu(dst, dst, 4);
3310 
3311  Subu(length, length, Operand(kPointerSize));
3312  Branch(&word_loop);
3313 
3314  // Copy the last bytes if any left.
3315  bind(&byte_loop);
3316  Branch(&done, eq, length, Operand(zero_reg));
3317  bind(&byte_loop_1);
3318  lbu(scratch, MemOperand(src));
3319  Addu(src, src, 1);
3320  sb(scratch, MemOperand(dst));
3321  Addu(dst, dst, 1);
3322  Subu(length, length, Operand(1));
3323  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3324  bind(&done);
3325 }
3326 
3327 
3328 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3329  Register end_offset,
3330  Register filler) {
3331  Label loop, entry;
3332  Branch(&entry);
3333  bind(&loop);
3334  sw(filler, MemOperand(start_offset));
3335  Addu(start_offset, start_offset, kPointerSize);
3336  bind(&entry);
3337  Branch(&loop, lt, start_offset, Operand(end_offset));
3338 }
3339 
3340 
3341 void MacroAssembler::CheckFastElements(Register map,
3342  Register scratch,
3343  Label* fail) {
3348  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3349  Branch(fail, hi, scratch,
3350  Operand(Map::kMaximumBitField2FastHoleyElementValue));
3351 }
3352 
3353 
3354 void MacroAssembler::CheckFastObjectElements(Register map,
3355  Register scratch,
3356  Label* fail) {
3361  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3362  Branch(fail, ls, scratch,
3363  Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3364  Branch(fail, hi, scratch,
3365  Operand(Map::kMaximumBitField2FastHoleyElementValue));
3366 }
3367 
3368 
3369 void MacroAssembler::CheckFastSmiElements(Register map,
3370  Register scratch,
3371  Label* fail) {
3374  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3375  Branch(fail, hi, scratch,
3376  Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3377 }
3378 
3379 
3380 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3381  Register key_reg,
3382  Register receiver_reg,
3383  Register elements_reg,
3384  Register scratch1,
3385  Register scratch2,
3386  Register scratch3,
3387  Register scratch4,
3388  Label* fail) {
3389  Label smi_value, maybe_nan, have_double_value, is_nan, done;
3390  Register mantissa_reg = scratch2;
3391  Register exponent_reg = scratch3;
3392 
3393  // Handle smi values specially.
3394  JumpIfSmi(value_reg, &smi_value);
3395 
3396  // Ensure that the object is a heap number
3397  CheckMap(value_reg,
3398  scratch1,
3399  Heap::kHeapNumberMapRootIndex,
3400  fail,
3402 
3403  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3404  // in the exponent.
3405  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3406  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3407  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3408 
3409  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3410 
3411  bind(&have_double_value);
3412  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3413  Addu(scratch1, scratch1, elements_reg);
3414  sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
3415  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
3416  sw(exponent_reg, FieldMemOperand(scratch1, offset));
3417  jmp(&done);
3418 
3419  bind(&maybe_nan);
3420  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3421  // it's an Infinity, and the non-NaN code path applies.
3422  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3423  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3424  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3425  bind(&is_nan);
3426  // Load canonical NaN for storing into the double array.
3427  uint64_t nan_int64 = BitCast<uint64_t>(
3428  FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3429  li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
3430  li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
3431  jmp(&have_double_value);
3432 
3433  bind(&smi_value);
3434  Addu(scratch1, elements_reg,
3435  Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3436  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3437  Addu(scratch1, scratch1, scratch2);
3438  // scratch1 is now effective address of the double element
3439 
3440  FloatingPointHelper::Destination destination;
3441  if (CpuFeatures::IsSupported(FPU)) {
3442  destination = FloatingPointHelper::kFPURegisters;
3443  } else {
3444  destination = FloatingPointHelper::kCoreRegisters;
3445  }
3446 
3447  Register untagged_value = receiver_reg;
3448  SmiUntag(untagged_value, value_reg);
3449  FloatingPointHelper::ConvertIntToDouble(this,
3450  untagged_value,
3451  destination,
3452  f0,
3453  mantissa_reg,
3454  exponent_reg,
3455  scratch4,
3456  f2);
3457  if (destination == FloatingPointHelper::kFPURegisters) {
3458  CpuFeatures::Scope scope(FPU);
3459  sdc1(f0, MemOperand(scratch1, 0));
3460  } else {
3461  sw(mantissa_reg, MemOperand(scratch1, 0));
3462  sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
3463  }
3464  bind(&done);
3465 }
3466 
3467 
3468 void MacroAssembler::CompareMapAndBranch(Register obj,
3469  Register scratch,
3470  Handle<Map> map,
3471  Label* early_success,
3472  Condition cond,
3473  Label* branch_to,
3474  CompareMapMode mode) {
3475  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3476  CompareMapAndBranch(scratch, map, early_success, cond, branch_to, mode);
3477 }
3478 
3479 
3480 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3481  Handle<Map> map,
3482  Label* early_success,
3483  Condition cond,
3484  Label* branch_to,
3485  CompareMapMode mode) {
3486  Operand right = Operand(map);
3487  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
3488  ElementsKind kind = map->elements_kind();
3489  if (IsFastElementsKind(kind)) {
3490  bool packed = IsFastPackedElementsKind(kind);
3491  Map* current_map = *map;
3492  while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
3493  kind = GetNextMoreGeneralFastElementsKind(kind, packed);
3494  current_map = current_map->LookupElementsTransitionMap(kind);
3495  if (!current_map) break;
3496  Branch(early_success, eq, obj_map, right);
3497  right = Operand(Handle<Map>(current_map));
3498  }
3499  }
3500  }
3501 
3502  Branch(branch_to, cond, obj_map, right);
3503 }
3504 
3505 
3506 void MacroAssembler::CheckMap(Register obj,
3507  Register scratch,
3508  Handle<Map> map,
3509  Label* fail,
3510  SmiCheckType smi_check_type,
3511  CompareMapMode mode) {
3512  if (smi_check_type == DO_SMI_CHECK) {
3513  JumpIfSmi(obj, fail);
3514  }
3515  Label success;
3516  CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
3517  bind(&success);
3518 }
3519 
3520 
3521 void MacroAssembler::DispatchMap(Register obj,
3522  Register scratch,
3523  Handle<Map> map,
3524  Handle<Code> success,
3525  SmiCheckType smi_check_type) {
3526  Label fail;
3527  if (smi_check_type == DO_SMI_CHECK) {
3528  JumpIfSmi(obj, &fail);
3529  }
3530  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3531  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3532  bind(&fail);
3533 }
3534 
3535 
3536 void MacroAssembler::CheckMap(Register obj,
3537  Register scratch,
3538  Heap::RootListIndex index,
3539  Label* fail,
3540  SmiCheckType smi_check_type) {
3541  if (smi_check_type == DO_SMI_CHECK) {
3542  JumpIfSmi(obj, fail);
3543  }
3544  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3545  LoadRoot(at, index);
3546  Branch(fail, ne, scratch, Operand(at));
3547 }
3548 
3549 
3550 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
3551  CpuFeatures::Scope scope(FPU);
3552  if (IsMipsSoftFloatABI) {
3553  Move(dst, v0, v1);
3554  } else {
3555  Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3556  }
3557 }
3558 
3559 
3560 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3561  CpuFeatures::Scope scope(FPU);
3562  if (!IsMipsSoftFloatABI) {
3563  Move(f12, dreg);
3564  } else {
3565  Move(a0, a1, dreg);
3566  }
3567 }
3568 
3569 
3570 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3571  DoubleRegister dreg2) {
3572  CpuFeatures::Scope scope(FPU);
3573  if (!IsMipsSoftFloatABI) {
3574  if (dreg2.is(f12)) {
3575  ASSERT(!dreg1.is(f14));
3576  Move(f14, dreg2);
3577  Move(f12, dreg1);
3578  } else {
3579  Move(f12, dreg1);
3580  Move(f14, dreg2);
3581  }
3582  } else {
3583  Move(a0, a1, dreg1);
3584  Move(a2, a3, dreg2);
3585  }
3586 }
3587 
3588 
3589 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3590  Register reg) {
3591  CpuFeatures::Scope scope(FPU);
3592  if (!IsMipsSoftFloatABI) {
3593  Move(f12, dreg);
3594  Move(a2, reg);
3595  } else {
3596  Move(a2, reg);
3597  Move(a0, a1, dreg);
3598  }
3599 }
3600 
3601 
3602 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3603  // This macro takes the dst register to make the code more readable
3604  // at the call sites. However, the dst register has to be t1 to
3605  // follow the calling convention which requires the call type to be
3606  // in t1.
3607  ASSERT(dst.is(t1));
3608  if (call_kind == CALL_AS_FUNCTION) {
3609  li(dst, Operand(Smi::FromInt(1)));
3610  } else {
3611  li(dst, Operand(Smi::FromInt(0)));
3612  }
3613 }
3614 
3615 
3616 // -----------------------------------------------------------------------------
3617 // JavaScript invokes.
3618 
3619 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3620  const ParameterCount& actual,
3621  Handle<Code> code_constant,
3622  Register code_reg,
3623  Label* done,
3624  bool* definitely_mismatches,
3625  InvokeFlag flag,
3626  const CallWrapper& call_wrapper,
3627  CallKind call_kind) {
3628  bool definitely_matches = false;
3629  *definitely_mismatches = false;
3630  Label regular_invoke;
3631 
3632  // Check whether the expected and actual arguments count match. If not,
3633  // setup registers according to contract with ArgumentsAdaptorTrampoline:
3634  // a0: actual arguments count
3635  // a1: function (passed through to callee)
3636  // a2: expected arguments count
3637  // a3: callee code entry
3638 
3639  // The code below is made a lot easier because the calling code already sets
3640  // up actual and expected registers according to the contract if values are
3641  // passed in registers.
3642  ASSERT(actual.is_immediate() || actual.reg().is(a0));
3643  ASSERT(expected.is_immediate() || expected.reg().is(a2));
3644  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3645 
3646  if (expected.is_immediate()) {
3647  ASSERT(actual.is_immediate());
3648  if (expected.immediate() == actual.immediate()) {
3649  definitely_matches = true;
3650  } else {
3651  li(a0, Operand(actual.immediate()));
3652  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3653  if (expected.immediate() == sentinel) {
3654  // Don't worry about adapting arguments for builtins that
3655  // don't want that done. Skip adaption code by making it look
3656  // like we have a match between expected and actual number of
3657  // arguments.
3658  definitely_matches = true;
3659  } else {
3660  *definitely_mismatches = true;
3661  li(a2, Operand(expected.immediate()));
3662  }
3663  }
3664  } else if (actual.is_immediate()) {
3665  Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3666  li(a0, Operand(actual.immediate()));
3667  } else {
3668  Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
3669  }
3670 
3671  if (!definitely_matches) {
3672  if (!code_constant.is_null()) {
3673  li(a3, Operand(code_constant));
3674  addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3675  }
3676 
3677  Handle<Code> adaptor =
3678  isolate()->builtins()->ArgumentsAdaptorTrampoline();
3679  if (flag == CALL_FUNCTION) {
3680  call_wrapper.BeforeCall(CallSize(adaptor));
3681  SetCallKind(t1, call_kind);
3682  Call(adaptor);
3683  call_wrapper.AfterCall();
3684  if (!*definitely_mismatches) {
3685  Branch(done);
3686  }
3687  } else {
3688  SetCallKind(t1, call_kind);
3689  Jump(adaptor, RelocInfo::CODE_TARGET);
3690  }
3691  bind(&regular_invoke);
3692  }
3693 }
3694 
3695 
3696 void MacroAssembler::InvokeCode(Register code,
3697  const ParameterCount& expected,
3698  const ParameterCount& actual,
3699  InvokeFlag flag,
3700  const CallWrapper& call_wrapper,
3701  CallKind call_kind) {
3702  // You can't call a function without a valid frame.
3703  ASSERT(flag == JUMP_FUNCTION || has_frame());
3704 
3705  Label done;
3706 
3707  bool definitely_mismatches = false;
3708  InvokePrologue(expected, actual, Handle<Code>::null(), code,
3709  &done, &definitely_mismatches, flag,
3710  call_wrapper, call_kind);
3711  if (!definitely_mismatches) {
3712  if (flag == CALL_FUNCTION) {
3713  call_wrapper.BeforeCall(CallSize(code));
3714  SetCallKind(t1, call_kind);
3715  Call(code);
3716  call_wrapper.AfterCall();
3717  } else {
3718  ASSERT(flag == JUMP_FUNCTION);
3719  SetCallKind(t1, call_kind);
3720  Jump(code);
3721  }
3722  // Continue here if InvokePrologue does handle the invocation due to
3723  // mismatched parameter counts.
3724  bind(&done);
3725  }
3726 }
3727 
3728 
3729 void MacroAssembler::InvokeCode(Handle<Code> code,
3730  const ParameterCount& expected,
3731  const ParameterCount& actual,
3732  RelocInfo::Mode rmode,
3733  InvokeFlag flag,
3734  CallKind call_kind) {
3735  // You can't call a function without a valid frame.
3736  ASSERT(flag == JUMP_FUNCTION || has_frame());
3737 
3738  Label done;
3739 
3740  bool definitely_mismatches = false;
3741  InvokePrologue(expected, actual, code, no_reg,
3742  &done, &definitely_mismatches, flag,
3743  NullCallWrapper(), call_kind);
3744  if (!definitely_mismatches) {
3745  if (flag == CALL_FUNCTION) {
3746  SetCallKind(t1, call_kind);
3747  Call(code, rmode);
3748  } else {
3749  SetCallKind(t1, call_kind);
3750  Jump(code, rmode);
3751  }
3752  // Continue here if InvokePrologue does handle the invocation due to
3753  // mismatched parameter counts.
3754  bind(&done);
3755  }
3756 }
3757 
3758 
3759 void MacroAssembler::InvokeFunction(Register function,
3760  const ParameterCount& actual,
3761  InvokeFlag flag,
3762  const CallWrapper& call_wrapper,
3763  CallKind call_kind) {
3764  // You can't call a function without a valid frame.
3765  ASSERT(flag == JUMP_FUNCTION || has_frame());
3766 
3767  // Contract with called JS functions requires that function is passed in a1.
3768  ASSERT(function.is(a1));
3769  Register expected_reg = a2;
3770  Register code_reg = a3;
3771 
3772  lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3773  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3774  lw(expected_reg,
3775  FieldMemOperand(code_reg,
3776  SharedFunctionInfo::kFormalParameterCountOffset));
3777  sra(expected_reg, expected_reg, kSmiTagSize);
3778  lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3779 
3780  ParameterCount expected(expected_reg);
3781  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
3782 }
3783 
3784 
3785 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3786  const ParameterCount& actual,
3787  InvokeFlag flag,
3788  const CallWrapper& call_wrapper,
3789  CallKind call_kind) {
3790  // You can't call a function without a valid frame.
3791  ASSERT(flag == JUMP_FUNCTION || has_frame());
3792 
3793  // Get the function and setup the context.
3794  LoadHeapObject(a1, function);
3795  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3796 
3797  ParameterCount expected(function->shared()->formal_parameter_count());
3798  // We call indirectly through the code field in the function to
3799  // allow recompilation to take effect without changing any of the
3800  // call sites.
3801  lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3802  InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
3803 }
3804 
3805 
3806 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3807  Register map,
3808  Register scratch,
3809  Label* fail) {
3810  lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3811  IsInstanceJSObjectType(map, scratch, fail);
3812 }
3813 
3814 
3815 void MacroAssembler::IsInstanceJSObjectType(Register map,
3816  Register scratch,
3817  Label* fail) {
3818  lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3819  Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3820  Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3821 }
3822 
3823 
3824 void MacroAssembler::IsObjectJSStringType(Register object,
3825  Register scratch,
3826  Label* fail) {
3827  ASSERT(kNotStringTag != 0);
3828 
3829  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3830  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3831  And(scratch, scratch, Operand(kIsNotStringMask));
3832  Branch(fail, ne, scratch, Operand(zero_reg));
3833 }
3834 
3835 
3836 // ---------------------------------------------------------------------------
3837 // Support functions.
3838 
3839 
3840 void MacroAssembler::TryGetFunctionPrototype(Register function,
3841  Register result,
3842  Register scratch,
3843  Label* miss,
3844  bool miss_on_bound_function) {
3845  // Check that the receiver isn't a smi.
3846  JumpIfSmi(function, miss);
3847 
3848  // Check that the function really is a function. Load map into result reg.
3849  GetObjectType(function, result, scratch);
3850  Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3851 
3852  if (miss_on_bound_function) {
3853  lw(scratch,
3854  FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3855  lw(scratch,
3856  FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3857  And(scratch, scratch,
3858  Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3859  Branch(miss, ne, scratch, Operand(zero_reg));
3860  }
3861 
3862  // Make sure that the function has an instance prototype.
3863  Label non_instance;
3864  lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3865  And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3866  Branch(&non_instance, ne, scratch, Operand(zero_reg));
3867 
3868  // Get the prototype or initial map from the function.
3869  lw(result,
3870  FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3871 
3872  // If the prototype or initial map is the hole, don't return it and
3873  // simply miss the cache instead. This will allow us to allocate a
3874  // prototype object on-demand in the runtime system.
3875  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3876  Branch(miss, eq, result, Operand(t8));
3877 
3878  // If the function does not have an initial map, we're done.
3879  Label done;
3880  GetObjectType(result, scratch, scratch);
3881  Branch(&done, ne, scratch, Operand(MAP_TYPE));
3882 
3883  // Get the prototype from the initial map.
3884  lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3885  jmp(&done);
3886 
3887  // Non-instance prototype: Fetch prototype from constructor field
3888  // in initial map.
3889  bind(&non_instance);
3890  lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3891 
3892  // All done.
3893  bind(&done);
3894 }
3895 
3896 
3897 void MacroAssembler::GetObjectType(Register object,
3898  Register map,
3899  Register type_reg) {
3900  lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3901  lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3902 }
3903 
3904 
3905 // -----------------------------------------------------------------------------
3906 // Runtime calls.
3907 
3908 void MacroAssembler::CallStub(CodeStub* stub,
3909  Condition cond,
3910  Register r1,
3911  const Operand& r2,
3912  BranchDelaySlot bd) {
3913  ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
3914  Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2, bd);
3915 }
3916 
3917 
3918 void MacroAssembler::TailCallStub(CodeStub* stub) {
3919  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
3920  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
3921 }
3922 
3923 
3924 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3925  return ref0.address() - ref1.address();
3926 }
3927 
3928 
3929 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
3930  int stack_space) {
3931  ExternalReference next_address =
3932  ExternalReference::handle_scope_next_address();
3933  const int kNextOffset = 0;
3934  const int kLimitOffset = AddressOffset(
3935  ExternalReference::handle_scope_limit_address(),
3936  next_address);
3937  const int kLevelOffset = AddressOffset(
3938  ExternalReference::handle_scope_level_address(),
3939  next_address);
3940 
3941  // Allocate HandleScope in callee-save registers.
3942  li(s3, Operand(next_address));
3943  lw(s0, MemOperand(s3, kNextOffset));
3944  lw(s1, MemOperand(s3, kLimitOffset));
3945  lw(s2, MemOperand(s3, kLevelOffset));
3946  Addu(s2, s2, Operand(1));
3947  sw(s2, MemOperand(s3, kLevelOffset));
3948 
3949  // The O32 ABI requires us to pass a pointer in a0 where the returned struct
3950  // (4 bytes) will be placed. This is also built into the Simulator.
3951  // Set up the pointer to the returned value (a0). It was allocated in
3952  // EnterExitFrame.
3953  addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
3954 
3955  // Native call returns to the DirectCEntry stub which redirects to the
3956  // return address pushed on stack (could have moved after GC).
3957  // DirectCEntry stub itself is generated early and never moves.
3958  DirectCEntryStub stub;
3959  stub.GenerateCall(this, function);
3960 
3961  // As mentioned above, on MIPS a pointer is returned - we need to dereference
3962  // it to get the actual return value (which is also a pointer).
3963  lw(v0, MemOperand(v0));
3964 
3965  Label promote_scheduled_exception;
3966  Label delete_allocated_handles;
3967  Label leave_exit_frame;
3968 
3969  // If result is non-zero, dereference to get the result value
3970  // otherwise set it to undefined.
3971  Label skip;
3972  LoadRoot(a0, Heap::kUndefinedValueRootIndex);
3973  Branch(&skip, eq, v0, Operand(zero_reg));
3974  lw(a0, MemOperand(v0));
3975  bind(&skip);
3976  mov(v0, a0);
3977 
3978  // No more valid handles (the result handle was the last one). Restore
3979  // previous handle scope.
3980  sw(s0, MemOperand(s3, kNextOffset));
3981  if (emit_debug_code()) {
3982  lw(a1, MemOperand(s3, kLevelOffset));
3983  Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
3984  }
3985  Subu(s2, s2, Operand(1));
3986  sw(s2, MemOperand(s3, kLevelOffset));
3987  lw(at, MemOperand(s3, kLimitOffset));
3988  Branch(&delete_allocated_handles, ne, s1, Operand(at));
3989 
3990  // Check if the function scheduled an exception.
3991  bind(&leave_exit_frame);
3992  LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3993  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3994  lw(t1, MemOperand(at));
3995  Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3996  li(s0, Operand(stack_space));
3997  LeaveExitFrame(false, s0, true);
3998 
3999  bind(&promote_scheduled_exception);
4000  TailCallExternalReference(
4001  ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4002  0,
4003  1);
4004 
4005  // HandleScope limit has changed. Delete allocated extensions.
4006  bind(&delete_allocated_handles);
4007  sw(s1, MemOperand(s3, kLimitOffset));
4008  mov(s0, v0);
4009  mov(a0, v0);
4010  PrepareCallCFunction(1, s1);
4011  li(a0, Operand(ExternalReference::isolate_address()));
4012  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4013  1);
4014  mov(v0, s0);
4015  jmp(&leave_exit_frame);
4016 }
4017 
4018 
4019 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4020  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
4021  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
4022 }
4023 
4024 
4025 void MacroAssembler::IllegalOperation(int num_arguments) {
4026  if (num_arguments > 0) {
4027  addiu(sp, sp, num_arguments * kPointerSize);
4028  }
4029  LoadRoot(v0, Heap::kUndefinedValueRootIndex);
4030 }
4031 
4032 
4033 void MacroAssembler::IndexFromHash(Register hash,
4034  Register index) {
4035  // If the hash field contains an array index pick it out. The assert checks
4036  // that the constants for the maximum number of digits for an array index
4037  // cached in the hash field and the number of bits reserved for it does not
4038  // conflict.
4039  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
4040  (1 << String::kArrayIndexValueBits));
4041  // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
4042  // the low kHashShift bits.
4043  STATIC_ASSERT(kSmiTag == 0);
4044  Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4045  sll(index, hash, kSmiTagSize);
4046 }
4047 
4048 
4049 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4050  FPURegister result,
4051  Register scratch1,
4052  Register scratch2,
4053  Register heap_number_map,
4054  Label* not_number,
4055  ObjectToDoubleFlags flags) {
4056  Label done;
4057  if ((flags & OBJECT_NOT_SMI) == 0) {
4058  Label not_smi;
4059  JumpIfNotSmi(object, &not_smi);
4060  // Remove smi tag and convert to double.
4061  sra(scratch1, object, kSmiTagSize);
4062  mtc1(scratch1, result);
4063  cvt_d_w(result, result);
4064  Branch(&done);
4065  bind(&not_smi);
4066  }
4067  // Check for heap number and load double value from it.
4068  lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4069  Branch(not_number, ne, scratch1, Operand(heap_number_map));
4070 
4071  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4072  // If exponent is all ones the number is either a NaN or +/-Infinity.
4073  Register exponent = scratch1;
4074  Register mask_reg = scratch2;
4075  lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4076  li(mask_reg, HeapNumber::kExponentMask);
4077 
4078  And(exponent, exponent, mask_reg);
4079  Branch(not_number, eq, exponent, Operand(mask_reg));
4080  }
4081  ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4082  bind(&done);
4083 }
4084 
4085 
4086 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4087  FPURegister value,
4088  Register scratch1) {
4089  sra(scratch1, smi, kSmiTagSize);
4090  mtc1(scratch1, value);
4091  cvt_d_w(value, value);
4092 }
4093 
4094 
4095 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4096  Register left,
4097  Register right,
4098  Register overflow_dst,
4099  Register scratch) {
4100  ASSERT(!dst.is(overflow_dst));
4101  ASSERT(!dst.is(scratch));
4102  ASSERT(!overflow_dst.is(scratch));
4103  ASSERT(!overflow_dst.is(left));
4104  ASSERT(!overflow_dst.is(right));
4105 
4106  if (left.is(right) && dst.is(left)) {
4107  ASSERT(!dst.is(t9));
4108  ASSERT(!scratch.is(t9));
4109  ASSERT(!left.is(t9));
4110  ASSERT(!right.is(t9));
4111  ASSERT(!overflow_dst.is(t9));
4112  mov(t9, right);
4113  right = t9;
4114  }
4115 
4116  if (dst.is(left)) {
4117  mov(scratch, left); // Preserve left.
4118  addu(dst, left, right); // Left is overwritten.
4119  xor_(scratch, dst, scratch); // Original left.
4120  xor_(overflow_dst, dst, right);
4121  and_(overflow_dst, overflow_dst, scratch);
4122  } else if (dst.is(right)) {
4123  mov(scratch, right); // Preserve right.
4124  addu(dst, left, right); // Right is overwritten.
4125  xor_(scratch, dst, scratch); // Original right.
4126  xor_(overflow_dst, dst, left);
4127  and_(overflow_dst, overflow_dst, scratch);
4128  } else {
4129  addu(dst, left, right);
4130  xor_(overflow_dst, dst, left);
4131  xor_(scratch, dst, right);
4132  and_(overflow_dst, scratch, overflow_dst);
4133  }
4134 }
4135 
4136 
4137 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4138  Register left,
4139  Register right,
4140  Register overflow_dst,
4141  Register scratch) {
4142  ASSERT(!dst.is(overflow_dst));
4143  ASSERT(!dst.is(scratch));
4144  ASSERT(!overflow_dst.is(scratch));
4145  ASSERT(!overflow_dst.is(left));
4146  ASSERT(!overflow_dst.is(right));
4147  ASSERT(!scratch.is(left));
4148  ASSERT(!scratch.is(right));
4149 
4150  // This happens with some crankshaft code. Since Subu works fine if
4151  // left == right, let's not make that restriction here.
4152  if (left.is(right)) {
4153  mov(dst, zero_reg);
4154  mov(overflow_dst, zero_reg);
4155  return;
4156  }
4157 
4158  if (dst.is(left)) {
4159  mov(scratch, left); // Preserve left.
4160  subu(dst, left, right); // Left is overwritten.
4161  xor_(overflow_dst, dst, scratch); // scratch is original left.
4162  xor_(scratch, scratch, right); // scratch is original left.
4163  and_(overflow_dst, scratch, overflow_dst);
4164  } else if (dst.is(right)) {
4165  mov(scratch, right); // Preserve right.
4166  subu(dst, left, right); // Right is overwritten.
4167  xor_(overflow_dst, dst, left);
4168  xor_(scratch, left, scratch); // Original right.
4169  and_(overflow_dst, scratch, overflow_dst);
4170  } else {
4171  subu(dst, left, right);
4172  xor_(overflow_dst, dst, left);
4173  xor_(scratch, left, right);
4174  and_(overflow_dst, scratch, overflow_dst);
4175  }
4176 }
4177 
4178 
4179 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4180  int num_arguments) {
4181  // All parameters are on the stack. v0 has the return value after call.
4182 
4183  // If the expected number of arguments of the runtime function is
4184  // constant, we check that the actual number of arguments match the
4185  // expectation.
4186  if (f->nargs >= 0 && f->nargs != num_arguments) {
4187  IllegalOperation(num_arguments);
4188  return;
4189  }
4190 
4191  // TODO(1236192): Most runtime routines don't need the number of
4192  // arguments passed in because it is constant. At some point we
4193  // should remove this need and make the runtime routine entry code
4194  // smarter.
4195  PrepareCEntryArgs(num_arguments);
4196  PrepareCEntryFunction(ExternalReference(f, isolate()));
4197  CEntryStub stub(1);
4198  CallStub(&stub);
4199 }
4200 
4201 
4202 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
4203  const Runtime::Function* function = Runtime::FunctionForId(id);
4204  PrepareCEntryArgs(function->nargs);
4205  PrepareCEntryFunction(ExternalReference(function, isolate()));
4206  CEntryStub stub(1, kSaveFPRegs);
4207  CallStub(&stub);
4208 }
4209 
4210 
4211 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
4212  CallRuntime(Runtime::FunctionForId(fid), num_arguments);
4213 }
4214 
4215 
4216 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4217  int num_arguments,
4218  BranchDelaySlot bd) {
4219  PrepareCEntryArgs(num_arguments);
4220  PrepareCEntryFunction(ext);
4221 
4222  CEntryStub stub(1);
4223  CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
4224 }
4225 
4226 
4227 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4228  int num_arguments,
4229  int result_size) {
4230  // TODO(1236192): Most runtime routines don't need the number of
4231  // arguments passed in because it is constant. At some point we
4232  // should remove this need and make the runtime routine entry code
4233  // smarter.
4234  PrepareCEntryArgs(num_arguments);
4235  JumpToExternalReference(ext);
4236 }
4237 
4238 
4239 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4240  int num_arguments,
4241  int result_size) {
4242  TailCallExternalReference(ExternalReference(fid, isolate()),
4243  num_arguments,
4244  result_size);
4245 }
4246 
4247 
4248 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4249  BranchDelaySlot bd) {
4250  PrepareCEntryFunction(builtin);
4251  CEntryStub stub(1);
4252  Jump(stub.GetCode(),
4253  RelocInfo::CODE_TARGET,
4254  al,
4255  zero_reg,
4256  Operand(zero_reg),
4257  bd);
4258 }
4259 
4260 
4261 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4262  InvokeFlag flag,
4263  const CallWrapper& call_wrapper) {
4264  // You can't call a builtin without a valid frame.
4265  ASSERT(flag == JUMP_FUNCTION || has_frame());
4266 
4267  GetBuiltinEntry(t9, id);
4268  if (flag == CALL_FUNCTION) {
4269  call_wrapper.BeforeCall(CallSize(t9));
4270  SetCallKind(t1, CALL_AS_METHOD);
4271  Call(t9);
4272  call_wrapper.AfterCall();
4273  } else {
4274  ASSERT(flag == JUMP_FUNCTION);
4275  SetCallKind(t1, CALL_AS_METHOD);
4276  Jump(t9);
4277  }
4278 }
4279 
4280 
4281 void MacroAssembler::GetBuiltinFunction(Register target,
4282  Builtins::JavaScript id) {
4283  // Load the builtins object into target register.
4284  lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4285  lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4286  // Load the JavaScript builtin function from the builtins object.
4287  lw(target, FieldMemOperand(target,
4288  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4289 }
4290 
4291 
4292 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4293  ASSERT(!target.is(a1));
4294  GetBuiltinFunction(a1, id);
4295  // Load the code entry point from the builtins object.
4296  lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4297 }
4298 
4299 
4300 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4301  Register scratch1, Register scratch2) {
4302  if (FLAG_native_code_counters && counter->Enabled()) {
4303  li(scratch1, Operand(value));
4304  li(scratch2, Operand(ExternalReference(counter)));
4305  sw(scratch1, MemOperand(scratch2));
4306  }
4307 }
4308 
4309 
4310 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4311  Register scratch1, Register scratch2) {
4312  ASSERT(value > 0);
4313  if (FLAG_native_code_counters && counter->Enabled()) {
4314  li(scratch2, Operand(ExternalReference(counter)));
4315  lw(scratch1, MemOperand(scratch2));
4316  Addu(scratch1, scratch1, Operand(value));
4317  sw(scratch1, MemOperand(scratch2));
4318  }
4319 }
4320 
4321 
4322 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4323  Register scratch1, Register scratch2) {
4324  ASSERT(value > 0);
4325  if (FLAG_native_code_counters && counter->Enabled()) {
4326  li(scratch2, Operand(ExternalReference(counter)));
4327  lw(scratch1, MemOperand(scratch2));
4328  Subu(scratch1, scratch1, Operand(value));
4329  sw(scratch1, MemOperand(scratch2));
4330  }
4331 }
4332 
4333 
4334 // -----------------------------------------------------------------------------
4335 // Debugging.
4336 
4337 void MacroAssembler::Assert(Condition cc, const char* msg,
4338  Register rs, Operand rt) {
4339  if (emit_debug_code())
4340  Check(cc, msg, rs, rt);
4341 }
4342 
4343 
4344 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4345  Heap::RootListIndex index) {
4346  if (emit_debug_code()) {
4347  LoadRoot(at, index);
4348  Check(eq, "Register did not match expected root", reg, Operand(at));
4349  }
4350 }
4351 
4352 
4353 void MacroAssembler::AssertFastElements(Register elements) {
4354  if (emit_debug_code()) {
4355  ASSERT(!elements.is(at));
4356  Label ok;
4357  push(elements);
4358  lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4359  LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4360  Branch(&ok, eq, elements, Operand(at));
4361  LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4362  Branch(&ok, eq, elements, Operand(at));
4363  LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4364  Branch(&ok, eq, elements, Operand(at));
4365  Abort("JSObject with fast elements map has slow elements");
4366  bind(&ok);
4367  pop(elements);
4368  }
4369 }
4370 
4371 
4372 void MacroAssembler::Check(Condition cc, const char* msg,
4373  Register rs, Operand rt) {
4374  Label L;
4375  Branch(&L, cc, rs, rt);
4376  Abort(msg);
4377  // Will not return here.
4378  bind(&L);
4379 }
4380 
4381 
4382 void MacroAssembler::Abort(const char* msg) {
4383  Label abort_start;
4384  bind(&abort_start);
4385  // We want to pass the msg string like a smi to avoid GC
4386  // problems, however msg is not guaranteed to be aligned
4387  // properly. Instead, we pass an aligned pointer that is
4388  // a proper v8 smi, but also pass the alignment difference
4389  // from the real pointer as a smi.
4390  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
4391  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
4392  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4393 #ifdef DEBUG
4394  if (msg != NULL) {
4395  RecordComment("Abort message: ");
4396  RecordComment(msg);
4397  }
4398 #endif
4399 
4400  li(a0, Operand(p0));
4401  push(a0);
4402  li(a0, Operand(Smi::FromInt(p1 - p0)));
4403  push(a0);
4404  // Disable stub call restrictions to always allow calls to abort.
4405  if (!has_frame_) {
4406  // We don't actually want to generate a pile of code for this, so just
4407  // claim there is a stack frame, without generating one.
4408  FrameScope scope(this, StackFrame::NONE);
4409  CallRuntime(Runtime::kAbort, 2);
4410  } else {
4411  CallRuntime(Runtime::kAbort, 2);
4412  }
4413  // Will not return here.
4414  if (is_trampoline_pool_blocked()) {
4415  // If the calling code cares about the exact number of
4416  // instructions generated, we insert padding here to keep the size
4417  // of the Abort macro constant.
4418  // Currently in debug mode with debug_code enabled the number of
4419  // generated instructions is 14, so we use this as a maximum value.
4420  static const int kExpectedAbortInstructions = 14;
4421  int abort_instructions = InstructionsGeneratedSince(&abort_start);
4422  ASSERT(abort_instructions <= kExpectedAbortInstructions);
4423  while (abort_instructions++ < kExpectedAbortInstructions) {
4424  nop();
4425  }
4426  }
4427 }
4428 
4429 
4430 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4431  if (context_chain_length > 0) {
4432  // Move up the chain of contexts to the context containing the slot.
4433  lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4434  for (int i = 1; i < context_chain_length; i++) {
4435  lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4436  }
4437  } else {
4438  // Slot is in the current function context. Move it into the
4439  // destination register in case we store into it (the write barrier
4440  // cannot be allowed to destroy the context in esi).
4441  Move(dst, cp);
4442  }
4443 }
4444 
4445 
4446 void MacroAssembler::LoadTransitionedArrayMapConditional(
4447  ElementsKind expected_kind,
4448  ElementsKind transitioned_kind,
4449  Register map_in_out,
4450  Register scratch,
4451  Label* no_map_match) {
4452  // Load the global or builtins object from the current context.
4453  lw(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4454  lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
4455 
4456  // Check that the function's map is the same as the expected cached map.
4457  lw(scratch,
4458  MemOperand(scratch,
4459  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4460  size_t offset = expected_kind * kPointerSize +
4461  FixedArrayBase::kHeaderSize;
4462  Branch(no_map_match, ne, map_in_out, Operand(scratch));
4463 
4464  // Use the transitioned cached map.
4465  offset = transitioned_kind * kPointerSize +
4466  FixedArrayBase::kHeaderSize;
4467  lw(map_in_out, FieldMemOperand(scratch, offset));
4468 }
4469 
4470 
4471 void MacroAssembler::LoadInitialArrayMap(
4472  Register function_in, Register scratch,
4473  Register map_out, bool can_have_holes) {
4474  ASSERT(!function_in.is(map_out));
4475  Label done;
4476  lw(map_out, FieldMemOperand(function_in,
4477  JSFunction::kPrototypeOrInitialMapOffset));
4478  if (!FLAG_smi_only_arrays) {
4479  ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4480  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4481  kind,
4482  map_out,
4483  scratch,
4484  &done);
4485  } else if (can_have_holes) {
4486  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4488  map_out,
4489  scratch,
4490  &done);
4491  }
4492  bind(&done);
4493 }
4494 
4495 
4496 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4497  // Load the global or builtins object from the current context.
4498  lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4499  // Load the global context from the global or builtins object.
4500  lw(function, FieldMemOperand(function,
4501  GlobalObject::kGlobalContextOffset));
4502  // Load the function from the global context.
4503  lw(function, MemOperand(function, Context::SlotOffset(index)));
4504 }
4505 
4506 
4507 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4508  Register map,
4509  Register scratch) {
4510  // Load the initial map. The global functions all have initial maps.
4511  lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4512  if (emit_debug_code()) {
4513  Label ok, fail;
4514  CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4515  Branch(&ok);
4516  bind(&fail);
4517  Abort("Global functions must have initial map");
4518  bind(&ok);
4519  }
4520 }
4521 
4522 
4523 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4524  addiu(sp, sp, -5 * kPointerSize);
4525  li(t8, Operand(Smi::FromInt(type)));
4526  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4527  sw(ra, MemOperand(sp, 4 * kPointerSize));
4528  sw(fp, MemOperand(sp, 3 * kPointerSize));
4529  sw(cp, MemOperand(sp, 2 * kPointerSize));
4530  sw(t8, MemOperand(sp, 1 * kPointerSize));
4531  sw(t9, MemOperand(sp, 0 * kPointerSize));
4532  addiu(fp, sp, 3 * kPointerSize);
4533 }
4534 
4535 
4536 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4537  mov(sp, fp);
4538  lw(fp, MemOperand(sp, 0 * kPointerSize));
4539  lw(ra, MemOperand(sp, 1 * kPointerSize));
4540  addiu(sp, sp, 2 * kPointerSize);
4541 }
4542 
4543 
4544 void MacroAssembler::EnterExitFrame(bool save_doubles,
4545  int stack_space) {
4546  // Set up the frame structure on the stack.
4547  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4548  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4549  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4550 
4551  // This is how the stack will look:
4552  // fp + 2 (==kCallerSPDisplacement) - old stack's end
4553  // [fp + 1 (==kCallerPCOffset)] - saved old ra
4554  // [fp + 0 (==kCallerFPOffset)] - saved old fp
4555  // [fp - 1 (==kSPOffset)] - sp of the called function
4556  // [fp - 2 (==kCodeOffset)] - CodeObject
4557  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4558  // new stack (will contain saved ra)
4559 
4560  // Save registers.
4561  addiu(sp, sp, -4 * kPointerSize);
4562  sw(ra, MemOperand(sp, 3 * kPointerSize));
4563  sw(fp, MemOperand(sp, 2 * kPointerSize));
4564  addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4565 
4566  if (emit_debug_code()) {
4567  sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4568  }
4569 
4570  // Accessed from ExitFrame::code_slot.
4571  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4572  sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4573 
4574  // Save the frame pointer and the context in top.
4575  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4576  sw(fp, MemOperand(t8));
4577  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4578  sw(cp, MemOperand(t8));
4579 
4580  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4581  if (save_doubles) {
4582  // The stack must be allign to 0 modulo 8 for stores with sdc1.
4583  ASSERT(kDoubleSize == frame_alignment);
4584  if (frame_alignment > 0) {
4585  ASSERT(IsPowerOf2(frame_alignment));
4586  And(sp, sp, Operand(-frame_alignment)); // Align stack.
4587  }
4589  Subu(sp, sp, Operand(space));
4590  // Remember: we only need to save every 2nd double FPU value.
4591  for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4592  FPURegister reg = FPURegister::from_code(i);
4593  sdc1(reg, MemOperand(sp, i * kDoubleSize));
4594  }
4595  }
4596 
4597  // Reserve place for the return address, stack space and an optional slot
4598  // (used by the DirectCEntryStub to hold the return value if a struct is
4599  // returned) and align the frame preparing for calling the runtime function.
4600  ASSERT(stack_space >= 0);
4601  Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4602  if (frame_alignment > 0) {
4603  ASSERT(IsPowerOf2(frame_alignment));
4604  And(sp, sp, Operand(-frame_alignment)); // Align stack.
4605  }
4606 
4607  // Set the exit frame sp value to point just before the return address
4608  // location.
4609  addiu(at, sp, kPointerSize);
4610  sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4611 }
4612 
4613 
4614 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4615  Register argument_count,
4616  bool do_return) {
4617  // Optionally restore all double registers.
4618  if (save_doubles) {
4619  // Remember: we only need to restore every 2nd double FPU value.
4620  lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4621  for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4622  FPURegister reg = FPURegister::from_code(i);
4623  ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4624  }
4625  }
4626 
4627  // Clear top frame.
4628  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4629  sw(zero_reg, MemOperand(t8));
4630 
4631  // Restore current context from top and clear it in debug mode.
4632  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4633  lw(cp, MemOperand(t8));
4634 #ifdef DEBUG
4635  sw(a3, MemOperand(t8));
4636 #endif
4637 
4638  // Pop the arguments, restore registers, and return.
4639  mov(sp, fp); // Respect ABI stack constraint.
4640  lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4641  lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4642 
4643  if (argument_count.is_valid()) {
4644  sll(t8, argument_count, kPointerSizeLog2);
4645  addu(sp, sp, t8);
4646  }
4647 
4648  if (do_return) {
4649  Ret(USE_DELAY_SLOT);
4650  // If returning, the instruction in the delay slot will be the addiu below.
4651  }
4652  addiu(sp, sp, 8);
4653 }
4654 
4655 
4656 void MacroAssembler::InitializeNewString(Register string,
4657  Register length,
4658  Heap::RootListIndex map_index,
4659  Register scratch1,
4660  Register scratch2) {
4661  sll(scratch1, length, kSmiTagSize);
4662  LoadRoot(scratch2, map_index);
4663  sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4664  li(scratch1, Operand(String::kEmptyHashField));
4665  sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4666  sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4667 }
4668 
4669 
4670 int MacroAssembler::ActivationFrameAlignment() {
4671 #if defined(V8_HOST_ARCH_MIPS)
4672  // Running on the real platform. Use the alignment as mandated by the local
4673  // environment.
4674  // Note: This will break if we ever start generating snapshots on one Mips
4675  // platform for another Mips platform with a different alignment.
4676  return OS::ActivationFrameAlignment();
4677 #else // defined(V8_HOST_ARCH_MIPS)
4678  // If we are using the simulator then we should always align to the expected
4679  // alignment. As the simulator is used to generate snapshots we do not know
4680  // if the target platform will need alignment, so this is controlled from a
4681  // flag.
4682  return FLAG_sim_stack_alignment;
4683 #endif // defined(V8_HOST_ARCH_MIPS)
4684 }
4685 
4686 
4687 void MacroAssembler::AssertStackIsAligned() {
4688  if (emit_debug_code()) {
4689  const int frame_alignment = ActivationFrameAlignment();
4690  const int frame_alignment_mask = frame_alignment - 1;
4691 
4692  if (frame_alignment > kPointerSize) {
4693  Label alignment_as_expected;
4694  ASSERT(IsPowerOf2(frame_alignment));
4695  andi(at, sp, frame_alignment_mask);
4696  Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4697  // Don't use Check here, as it will call Runtime_Abort re-entering here.
4698  stop("Unexpected stack alignment");
4699  bind(&alignment_as_expected);
4700  }
4701  }
4702 }
4703 
4704 
4705 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4706  Register reg,
4707  Register scratch,
4708  Label* not_power_of_two_or_zero) {
4709  Subu(scratch, reg, Operand(1));
4710  Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4711  scratch, Operand(zero_reg));
4712  and_(at, scratch, reg); // In the delay slot.
4713  Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4714 }
4715 
4716 
4717 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4718  ASSERT(!reg.is(overflow));
4719  mov(overflow, reg); // Save original value.
4720  SmiTag(reg);
4721  xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
4722 }
4723 
4724 
4725 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4726  Register src,
4727  Register overflow) {
4728  if (dst.is(src)) {
4729  // Fall back to slower case.
4730  SmiTagCheckOverflow(dst, overflow);
4731  } else {
4732  ASSERT(!dst.is(src));
4733  ASSERT(!dst.is(overflow));
4734  ASSERT(!src.is(overflow));
4735  SmiTag(dst, src);
4736  xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
4737  }
4738 }
4739 
4740 
4741 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4742  Register src,
4743  Label* smi_case) {
4744  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4745  SmiUntag(dst, src);
4746 }
4747 
4748 
4749 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4750  Register src,
4751  Label* non_smi_case) {
4752  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4753  SmiUntag(dst, src);
4754 }
4755 
4756 void MacroAssembler::JumpIfSmi(Register value,
4757  Label* smi_label,
4758  Register scratch,
4759  BranchDelaySlot bd) {
4760  ASSERT_EQ(0, kSmiTag);
4761  andi(scratch, value, kSmiTagMask);
4762  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4763 }
4764 
4765 void MacroAssembler::JumpIfNotSmi(Register value,
4766  Label* not_smi_label,
4767  Register scratch,
4768  BranchDelaySlot bd) {
4769  ASSERT_EQ(0, kSmiTag);
4770  andi(scratch, value, kSmiTagMask);
4771  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4772 }
4773 
4774 
4775 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4776  Register reg2,
4777  Label* on_not_both_smi) {
4778  STATIC_ASSERT(kSmiTag == 0);
4779  ASSERT_EQ(1, kSmiTagMask);
4780  or_(at, reg1, reg2);
4781  JumpIfNotSmi(at, on_not_both_smi);
4782 }
4783 
4784 
4785 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4786  Register reg2,
4787  Label* on_either_smi) {
4788  STATIC_ASSERT(kSmiTag == 0);
4789  ASSERT_EQ(1, kSmiTagMask);
4790  // Both Smi tags must be 1 (not Smi).
4791  and_(at, reg1, reg2);
4792  JumpIfSmi(at, on_either_smi);
4793 }
4794 
4795 
4796 void MacroAssembler::AbortIfSmi(Register object) {
4797  STATIC_ASSERT(kSmiTag == 0);
4798  andi(at, object, kSmiTagMask);
4799  Assert(ne, "Operand is a smi", at, Operand(zero_reg));
4800 }
4801 
4802 
4803 void MacroAssembler::AbortIfNotSmi(Register object) {
4804  STATIC_ASSERT(kSmiTag == 0);
4805  andi(at, object, kSmiTagMask);
4806  Assert(eq, "Operand is a smi", at, Operand(zero_reg));
4807 }
4808 
4809 
4810 void MacroAssembler::AbortIfNotString(Register object) {
4811  STATIC_ASSERT(kSmiTag == 0);
4812  And(t0, object, Operand(kSmiTagMask));
4813  Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
4814  push(object);
4815  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4816  lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4817  Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
4818  pop(object);
4819 }
4820 
4821 
4822 void MacroAssembler::AbortIfNotRootValue(Register src,
4823  Heap::RootListIndex root_value_index,
4824  const char* message) {
4825  ASSERT(!src.is(at));
4826  LoadRoot(at, root_value_index);
4827  Assert(eq, message, src, Operand(at));
4828 }
4829 
4830 
4831 void MacroAssembler::JumpIfNotHeapNumber(Register object,
4832  Register heap_number_map,
4833  Register scratch,
4834  Label* on_not_heap_number) {
4835  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4836  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4837  Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4838 }
4839 
4840 
4841 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4842  Register first,
4843  Register second,
4844  Register scratch1,
4845  Register scratch2,
4846  Label* failure) {
4847  // Test that both first and second are sequential ASCII strings.
4848  // Assume that they are non-smis.
4849  lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4850  lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4851  lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4852  lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4853 
4854  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4855  scratch2,
4856  scratch1,
4857  scratch2,
4858  failure);
4859 }
4860 
4861 
4862 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4863  Register second,
4864  Register scratch1,
4865  Register scratch2,
4866  Label* failure) {
4867  // Check that neither is a smi.
4868  STATIC_ASSERT(kSmiTag == 0);
4869  And(scratch1, first, Operand(second));
4870  JumpIfSmi(scratch1, failure);
4871  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4872  second,
4873  scratch1,
4874  scratch2,
4875  failure);
4876 }
4877 
4878 
4879 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4880  Register first,
4881  Register second,
4882  Register scratch1,
4883  Register scratch2,
4884  Label* failure) {
4885  int kFlatAsciiStringMask =
4887  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4888  ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
4889  andi(scratch1, first, kFlatAsciiStringMask);
4890  Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
4891  andi(scratch2, second, kFlatAsciiStringMask);
4892  Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
4893 }
4894 
4895 
4896 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4897  Register scratch,
4898  Label* failure) {
4899  int kFlatAsciiStringMask =
4901  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4902  And(scratch, type, Operand(kFlatAsciiStringMask));
4903  Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
4904 }
4905 
4906 
4907 static const int kRegisterPassedArguments = 4;
4908 
4909 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
4910  int num_double_arguments) {
4911  int stack_passed_words = 0;
4912  num_reg_arguments += 2 * num_double_arguments;
4913 
4914  // Up to four simple arguments are passed in registers a0..a3.
4915  if (num_reg_arguments > kRegisterPassedArguments) {
4916  stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
4917  }
4918  stack_passed_words += kCArgSlotCount;
4919  return stack_passed_words;
4920 }
4921 
4922 
4923 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4924  int num_double_arguments,
4925  Register scratch) {
4926  int frame_alignment = ActivationFrameAlignment();
4927 
4928  // Up to four simple arguments are passed in registers a0..a3.
4929  // Those four arguments must have reserved argument slots on the stack for
4930  // mips, even though those argument slots are not normally used.
4931  // Remaining arguments are pushed on the stack, above (higher address than)
4932  // the argument slots.
4933  int stack_passed_arguments = CalculateStackPassedWords(
4934  num_reg_arguments, num_double_arguments);
4935  if (frame_alignment > kPointerSize) {
4936  // Make stack end at alignment and make room for num_arguments - 4 words
4937  // and the original value of sp.
4938  mov(scratch, sp);
4939  Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4940  ASSERT(IsPowerOf2(frame_alignment));
4941  And(sp, sp, Operand(-frame_alignment));
4942  sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
4943  } else {
4944  Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
4945  }
4946 }
4947 
4948 
4949 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4950  Register scratch) {
4951  PrepareCallCFunction(num_reg_arguments, 0, scratch);
4952 }
4953 
4954 
4955 void MacroAssembler::CallCFunction(ExternalReference function,
4956  int num_reg_arguments,
4957  int num_double_arguments) {
4958  li(t8, Operand(function));
4959  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
4960 }
4961 
4962 
4963 void MacroAssembler::CallCFunction(Register function,
4964  int num_reg_arguments,
4965  int num_double_arguments) {
4966  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
4967 }
4968 
4969 
4970 void MacroAssembler::CallCFunction(ExternalReference function,
4971  int num_arguments) {
4972  CallCFunction(function, num_arguments, 0);
4973 }
4974 
4975 
4976 void MacroAssembler::CallCFunction(Register function,
4977  int num_arguments) {
4978  CallCFunction(function, num_arguments, 0);
4979 }
4980 
4981 
4982 void MacroAssembler::CallCFunctionHelper(Register function,
4983  int num_reg_arguments,
4984  int num_double_arguments) {
4985  ASSERT(has_frame());
4986  // Make sure that the stack is aligned before calling a C function unless
4987  // running in the simulator. The simulator has its own alignment check which
4988  // provides more information.
4989  // The argument stots are presumed to have been set up by
4990  // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
4991 
4992 #if defined(V8_HOST_ARCH_MIPS)
4993  if (emit_debug_code()) {
4994  int frame_alignment = OS::ActivationFrameAlignment();
4995  int frame_alignment_mask = frame_alignment - 1;
4996  if (frame_alignment > kPointerSize) {
4997  ASSERT(IsPowerOf2(frame_alignment));
4998  Label alignment_as_expected;
4999  And(at, sp, Operand(frame_alignment_mask));
5000  Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5001  // Don't use Check here, as it will call Runtime_Abort possibly
5002  // re-entering here.
5003  stop("Unexpected alignment in CallCFunction");
5004  bind(&alignment_as_expected);
5005  }
5006  }
5007 #endif // V8_HOST_ARCH_MIPS
5008 
5009  // Just call directly. The function called cannot cause a GC, or
5010  // allow preemption, so the return address in the link register
5011  // stays correct.
5012 
5013  if (!function.is(t9)) {
5014  mov(t9, function);
5015  function = t9;
5016  }
5017 
5018  Call(function);
5019 
5020  int stack_passed_arguments = CalculateStackPassedWords(
5021  num_reg_arguments, num_double_arguments);
5022 
5023  if (OS::ActivationFrameAlignment() > kPointerSize) {
5024  lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5025  } else {
5026  Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5027  }
5028 }
5029 
5030 
5031 #undef BRANCH_ARGS_CHECK
5032 
5033 
5034 void MacroAssembler::PatchRelocatedValue(Register li_location,
5035  Register scratch,
5036  Register new_value) {
5037  lw(scratch, MemOperand(li_location));
5038  // At this point scratch is a lui(at, ...) instruction.
5039  if (emit_debug_code()) {
5040  And(scratch, scratch, kOpcodeMask);
5041  Check(eq, "The instruction to patch should be a lui.",
5042  scratch, Operand(LUI));
5043  lw(scratch, MemOperand(li_location));
5044  }
5045  srl(t9, new_value, kImm16Bits);
5046  Ins(scratch, t9, 0, kImm16Bits);
5047  sw(scratch, MemOperand(li_location));
5048 
5049  lw(scratch, MemOperand(li_location, kInstrSize));
5050  // scratch is now ori(at, ...).
5051  if (emit_debug_code()) {
5052  And(scratch, scratch, kOpcodeMask);
5053  Check(eq, "The instruction to patch should be an ori.",
5054  scratch, Operand(ORI));
5055  lw(scratch, MemOperand(li_location, kInstrSize));
5056  }
5057  Ins(scratch, new_value, 0, kImm16Bits);
5058  sw(scratch, MemOperand(li_location, kInstrSize));
5059 
5060  // Update the I-cache so the new lui and ori can be executed.
5061  FlushICache(li_location, 2);
5062 }
5063 
5064 void MacroAssembler::GetRelocatedValue(Register li_location,
5065  Register value,
5066  Register scratch) {
5067  lw(value, MemOperand(li_location));
5068  if (emit_debug_code()) {
5069  And(value, value, kOpcodeMask);
5070  Check(eq, "The instruction should be a lui.",
5071  value, Operand(LUI));
5072  lw(value, MemOperand(li_location));
5073  }
5074 
5075  // value now holds a lui instruction. Extract the immediate.
5076  sll(value, value, kImm16Bits);
5077 
5078  lw(scratch, MemOperand(li_location, kInstrSize));
5079  if (emit_debug_code()) {
5080  And(scratch, scratch, kOpcodeMask);
5081  Check(eq, "The instruction should be an ori.",
5082  scratch, Operand(ORI));
5083  lw(scratch, MemOperand(li_location, kInstrSize));
5084  }
5085  // "scratch" now holds an ori instruction. Extract the immediate.
5086  andi(scratch, scratch, kImm16Mask);
5087 
5088  // Merge the results.
5089  or_(value, value, scratch);
5090 }
5091 
5092 
5093 void MacroAssembler::CheckPageFlag(
5094  Register object,
5095  Register scratch,
5096  int mask,
5097  Condition cc,
5098  Label* condition_met) {
5099  And(scratch, object, Operand(~Page::kPageAlignmentMask));
5100  lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5101  And(scratch, scratch, Operand(mask));
5102  Branch(condition_met, cc, scratch, Operand(zero_reg));
5103 }
5104 
5105 
5106 void MacroAssembler::JumpIfBlack(Register object,
5107  Register scratch0,
5108  Register scratch1,
5109  Label* on_black) {
5110  HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5111  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5112 }
5113 
5114 
5115 void MacroAssembler::HasColor(Register object,
5116  Register bitmap_scratch,
5117  Register mask_scratch,
5118  Label* has_color,
5119  int first_bit,
5120  int second_bit) {
5121  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5122  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5123 
5124  GetMarkBits(object, bitmap_scratch, mask_scratch);
5125 
5126  Label other_color, word_boundary;
5127  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5128  And(t8, t9, Operand(mask_scratch));
5129  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5130  // Shift left 1 by adding.
5131  Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5132  Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5133  And(t8, t9, Operand(mask_scratch));
5134  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5135  jmp(&other_color);
5136 
5137  bind(&word_boundary);
5138  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5139  And(t9, t9, Operand(1));
5140  Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5141  bind(&other_color);
5142 }
5143 
5144 
5145 // Detect some, but not all, common pointer-free objects. This is used by the
5146 // incremental write barrier which doesn't care about oddballs (they are always
5147 // marked black immediately so this code is not hit).
5148 void MacroAssembler::JumpIfDataObject(Register value,
5149  Register scratch,
5150  Label* not_data_object) {
5151  ASSERT(!AreAliased(value, scratch, t8, no_reg));
5152  Label is_data_object;
5153  lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5154  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5155  Branch(&is_data_object, eq, t8, Operand(scratch));
5157  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5158  // If it's a string and it's not a cons string then it's an object containing
5159  // no GC pointers.
5160  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5161  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5162  Branch(not_data_object, ne, t8, Operand(zero_reg));
5163  bind(&is_data_object);
5164 }
5165 
5166 
5167 void MacroAssembler::GetMarkBits(Register addr_reg,
5168  Register bitmap_reg,
5169  Register mask_reg) {
5170  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5171  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5172  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5173  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5174  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5175  sll(t8, t8, kPointerSizeLog2);
5176  Addu(bitmap_reg, bitmap_reg, t8);
5177  li(t8, Operand(1));
5178  sllv(mask_reg, t8, mask_reg);
5179 }
5180 
5181 
5182 void MacroAssembler::EnsureNotWhite(
5183  Register value,
5184  Register bitmap_scratch,
5185  Register mask_scratch,
5186  Register load_scratch,
5187  Label* value_is_white_and_not_data) {
5188  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5189  GetMarkBits(value, bitmap_scratch, mask_scratch);
5190 
5191  // If the value is black or grey we don't need to do anything.
5192  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5193  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5194  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5195  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5196 
5197  Label done;
5198 
5199  // Since both black and grey have a 1 in the first position and white does
5200  // not have a 1 there we only need to check one bit.
5201  lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5202  And(t8, mask_scratch, load_scratch);
5203  Branch(&done, ne, t8, Operand(zero_reg));
5204 
5205  if (emit_debug_code()) {
5206  // Check for impossible bit pattern.
5207  Label ok;
5208  // sll may overflow, making the check conservative.
5209  sll(t8, mask_scratch, 1);
5210  And(t8, load_scratch, t8);
5211  Branch(&ok, eq, t8, Operand(zero_reg));
5212  stop("Impossible marking bit pattern");
5213  bind(&ok);
5214  }
5215 
5216  // Value is white. We check whether it is data that doesn't need scanning.
5217  // Currently only checks for HeapNumber and non-cons strings.
5218  Register map = load_scratch; // Holds map while checking type.
5219  Register length = load_scratch; // Holds length of object after testing type.
5220  Label is_data_object;
5221 
5222  // Check for heap-number
5223  lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5224  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5225  {
5226  Label skip;
5227  Branch(&skip, ne, t8, Operand(map));
5228  li(length, HeapNumber::kSize);
5229  Branch(&is_data_object);
5230  bind(&skip);
5231  }
5232 
5233  // Check for strings.
5235  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5236  // If it's a string and it's not a cons string then it's an object containing
5237  // no GC pointers.
5238  Register instance_type = load_scratch;
5239  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5240  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5241  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5242  // It's a non-indirect (non-cons and non-slice) string.
5243  // If it's external, the length is just ExternalString::kSize.
5244  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5245  // External strings are the only ones with the kExternalStringTag bit
5246  // set.
5249  And(t8, instance_type, Operand(kExternalStringTag));
5250  {
5251  Label skip;
5252  Branch(&skip, eq, t8, Operand(zero_reg));
5253  li(length, ExternalString::kSize);
5254  Branch(&is_data_object);
5255  bind(&skip);
5256  }
5257 
5258  // Sequential string, either ASCII or UC16.
5259  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5260  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5261  // getting the length multiplied by 2.
5263  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5264  lw(t9, FieldMemOperand(value, String::kLengthOffset));
5265  And(t8, instance_type, Operand(kStringEncodingMask));
5266  {
5267  Label skip;
5268  Branch(&skip, eq, t8, Operand(zero_reg));
5269  srl(t9, t9, 1);
5270  bind(&skip);
5271  }
5272  Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5273  And(length, length, Operand(~kObjectAlignmentMask));
5274 
5275  bind(&is_data_object);
5276  // Value is a data object, and it is white. Mark it black. Since we know
5277  // that the object is white we can make it black by flipping one bit.
5278  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5279  Or(t8, t8, Operand(mask_scratch));
5280  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5281 
5282  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5283  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5284  Addu(t8, t8, Operand(length));
5285  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5286 
5287  bind(&done);
5288 }
5289 
5290 
5291 void MacroAssembler::LoadInstanceDescriptors(Register map,
5292  Register descriptors) {
5293  lw(descriptors,
5294  FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
5295  Label not_smi;
5296  JumpIfNotSmi(descriptors, &not_smi);
5297  LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
5298  bind(&not_smi);
5299 }
5300 
5301 
5302 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5303  Label next;
5304  // Preload a couple of values used in the loop.
5305  Register empty_fixed_array_value = t2;
5306  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5307  Register empty_descriptor_array_value = t3;
5308  LoadRoot(empty_descriptor_array_value,
5309  Heap::kEmptyDescriptorArrayRootIndex);
5310  mov(a1, a0);
5311  bind(&next);
5312 
5313  // Check that there are no elements. Register a1 contains the
5314  // current JS object we've reached through the prototype chain.
5315  lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
5316  Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
5317 
5318  // Check that instance descriptors are not empty so that we can
5319  // check for an enum cache. Leave the map in a2 for the subsequent
5320  // prototype load.
5321  lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
5322  lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
5323  JumpIfSmi(a3, call_runtime);
5324 
5325  // Check that there is an enum cache in the non-empty instance
5326  // descriptors (a3). This is the case if the next enumeration
5327  // index field does not contain a smi.
5328  lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
5329  JumpIfSmi(a3, call_runtime);
5330 
5331  // For all objects but the receiver, check that the cache is empty.
5332  Label check_prototype;
5333  Branch(&check_prototype, eq, a1, Operand(a0));
5334  lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
5335  Branch(call_runtime, ne, a3, Operand(empty_fixed_array_value));
5336 
5337  // Load the prototype from the map and loop if non-null.
5338  bind(&check_prototype);
5339  lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
5340  Branch(&next, ne, a1, Operand(null_value));
5341 }
5342 
5343 
5344 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5345  ASSERT(!output_reg.is(input_reg));
5346  Label done;
5347  li(output_reg, Operand(255));
5348  // Normal branch: nop in delay slot.
5349  Branch(&done, gt, input_reg, Operand(output_reg));
5350  // Use delay slot in this branch.
5351  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5352  mov(output_reg, zero_reg); // In delay slot.
5353  mov(output_reg, input_reg); // Value is in range 0..255.
5354  bind(&done);
5355 }
5356 
5357 
5358 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5359  DoubleRegister input_reg,
5360  DoubleRegister temp_double_reg) {
5361  Label above_zero;
5362  Label done;
5363  Label in_bounds;
5364 
5365  Move(temp_double_reg, 0.0);
5366  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5367 
5368  // Double value is less than zero, NaN or Inf, return 0.
5369  mov(result_reg, zero_reg);
5370  Branch(&done);
5371 
5372  // Double value is >= 255, return 255.
5373  bind(&above_zero);
5374  Move(temp_double_reg, 255.0);
5375  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5376  li(result_reg, Operand(255));
5377  Branch(&done);
5378 
5379  // In 0-255 range, round and truncate.
5380  bind(&in_bounds);
5381  round_w_d(temp_double_reg, input_reg);
5382  mfc1(result_reg, temp_double_reg);
5383  bind(&done);
5384 }
5385 
5386 
5387 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5388  if (r1.is(r2)) return true;
5389  if (r1.is(r3)) return true;
5390  if (r1.is(r4)) return true;
5391  if (r2.is(r3)) return true;
5392  if (r2.is(r4)) return true;
5393  if (r3.is(r4)) return true;
5394  return false;
5395 }
5396 
5397 
5398 CodePatcher::CodePatcher(byte* address, int instructions)
5399  : address_(address),
5400  instructions_(instructions),
5401  size_(instructions * Assembler::kInstrSize),
5402  masm_(NULL, address, size_ + Assembler::kGap) {
5403  // Create a new macro assembler pointing to the address of the code to patch.
5404  // The size is adjusted with kGap on order for the assembler to generate size
5405  // bytes of instructions without failing with buffer size constraints.
5406  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5407 }
5408 
5409 
5410 CodePatcher::~CodePatcher() {
5411  // Indicate that code has changed.
5412  CPU::FlushICache(address_, size_);
5413 
5414  // Check that the code was patched as expected.
5415  ASSERT(masm_.pc_ == address_ + size_);
5416  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5417 }
5418 
5419 
5420 void CodePatcher::Emit(Instr instr) {
5421  masm()->emit(instr);
5422 }
5423 
5424 
5425 void CodePatcher::Emit(Address addr) {
5426  masm()->emit(reinterpret_cast<Instr>(addr));
5427 }
5428 
5429 
5430 void CodePatcher::ChangeBranchCondition(Condition cond) {
5431  Instr instr = Assembler::instr_at(masm_.pc_);
5432  ASSERT(Assembler::IsBranch(instr));
5433  uint32_t opcode = Assembler::GetOpcodeField(instr);
5434  // Currently only the 'eq' and 'ne' cond values are supported and the simple
5435  // branch instructions (with opcode being the branch type).
5436  // There are some special cases (see Assembler::IsBranch()) so extending this
5437  // would be tricky.
5438  ASSERT(opcode == BEQ ||
5439  opcode == BNE ||
5440  opcode == BLEZ ||
5441  opcode == BGTZ ||
5442  opcode == BEQL ||
5443  opcode == BNEL ||
5444  opcode == BLEZL ||
5445  opcode == BGTZL);
5446  opcode = (cond == eq) ? BEQ : BNE;
5447  instr = (instr & ~kOpcodeMask) | opcode;
5448  masm_.emit(instr);
5449 }
5450 
5451 
5452 } } // namespace v8::internal
5453 
5454 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:172
const Register cp
const RegList kSafepointSavedRegisters
Definition: frames-arm.h:97
const SwVfpRegister s2
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:3855
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: v8globals.h:478
const Register r3
const int kDoubleSizeLog2
Definition: globals.h:236
const int kNumRegisters
Definition: constants-arm.h:95
const FPURegister f0
const int kImm16Mask
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 instructions(ARM only)") DEFINE_bool(enable_armv7
Flag flags[]
Definition: flags.cc:1467
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:38
const int kSignMask
const int kNumSafepointSavedRegisters
Definition: frames-arm.h:98
const int kLuiShift
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const RegList kJSCallerSaved
Definition: frames-arm.h:47
const int kPointerSizeLog2
Definition: globals.h:246
unsigned short uint16_t
Definition: unicode.cc:46
const uint32_t kStringRepresentationMask
Definition: objects.h:455
const uint32_t kFCSRUnderflowFlagMask
#define CHECK(condition)
Definition: checks.h:56
const Register r2
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
bool IsFastElementsKind(ElementsKind kind)
const intptr_t kHeapObjectTagMask
Definition: v8.h:3850
const SwVfpRegister s6
const unsigned kNoASTId
Definition: assembler.h:54
const uint32_t kNotStringTag
Definition: objects.h:438
const Register sp
const SwVfpRegister s3
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
bool IsFastPackedElementsKind(ElementsKind kind)
const int kDoubleSize
Definition: globals.h:232
const uint32_t kFCSROverflowFlagMask
const int kImm16Bits
const uint32_t kIsIndirectStringMask
Definition: objects.h:462
const bool IsMipsSoftFloatABI
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
const int kOpcodeMask
const int kPointerSize
Definition: globals.h:234
const Address kZapValue
Definition: v8globals.h:89
const int kHeapObjectTag
Definition: v8.h:3848
bool IsAligned(T value, U alignment)
Definition: utils.h:206
bool is_uint16(int x)
Definition: assembler.h:847
const uint32_t kHoleNanLower32
Definition: v8globals.h:477
const int kCArgSlotCount
const int kImm28Mask
const SwVfpRegister s0
bool IsPowerOf2(T x)
Definition: utils.h:50
const FPURegister f2
int TenToThe(int exponent)
Definition: utils.h:794
MacroAssembler(Isolate *isolate, void *buffer, int size)
#define UNIMPLEMENTED_MIPS()
const SwVfpRegister s1
InvokeFlag
const uint32_t kFCSRInvalidOpFlagMask
const uint32_t kIsNotStringMask
Definition: objects.h:436
const Register r1
#define kDoubleRegZero
MemOperand FieldMemOperand(Register object, int offset)
bool is_int16(int x)
Definition: assembler.h:831
const int kNumSafepointRegisters
Definition: frames-arm.h:92
const FPUControlRegister FCSR
const int kSafepointRegisterStackIndexMap[kNumRegs]
Definition: frames-mips.h:117
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kSmiTagSize
Definition: v8.h:3854
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kThisPropertyAssignmentsOffset flag
Definition: objects-inl.h:3682
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping true
Definition: flags.cc:157
const int kSmiTag
Definition: v8.h:3853
const FPURegister f12
const uint32_t kIsIndirectStringTag
Definition: objects.h:463
const int kPageSizeBits
Definition: v8globals.h:100
const Register no_reg
const Register fp
const uint32_t kFCSRFlagMask
#define STATIC_ASSERT(test)
Definition: checks.h:283
const uint32_t kAsciiStringTag
Definition: objects.h:451
const int kHiMask
signed short int16_t
Definition: unicode.cc:45
Register ToRegister(int num)
const FPURegister f14
int NumberOfBitsSet(uint32_t x)
Definition: assembler.h:852
const int kCharSize
Definition: globals.h:229
const uint32_t kFCSRInexactFlagMask
FlagType type() const
Definition: flags.cc:1358
const uint32_t kStringEncodingMask
Definition: objects.h:449
const Register r4