v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
29 
30 #include "v8.h"
31 
32 #if V8_TARGET_ARCH_MIPS
33 
34 #include "bootstrapper.h"
35 #include "codegen.h"
36 #include "cpu-profiler.h"
37 #include "debug.h"
38 #include "isolate-inl.h"
39 #include "runtime.h"
40 
41 namespace v8 {
42 namespace internal {
43 
44 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45  : Assembler(arg_isolate, buffer, size),
46  generating_stub_(false),
47  has_frame_(false) {
48  if (isolate() != NULL) {
49  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
50  isolate());
51  }
52 }
53 
54 
55 void MacroAssembler::Load(Register dst,
56  const MemOperand& src,
57  Representation r) {
58  ASSERT(!r.IsDouble());
59  if (r.IsInteger8()) {
60  lb(dst, src);
61  } else if (r.IsUInteger8()) {
62  lbu(dst, src);
63  } else if (r.IsInteger16()) {
64  lh(dst, src);
65  } else if (r.IsUInteger16()) {
66  lhu(dst, src);
67  } else {
68  lw(dst, src);
69  }
70 }
71 
72 
73 void MacroAssembler::Store(Register src,
74  const MemOperand& dst,
75  Representation r) {
76  ASSERT(!r.IsDouble());
77  if (r.IsInteger8() || r.IsUInteger8()) {
78  sb(src, dst);
79  } else if (r.IsInteger16() || r.IsUInteger16()) {
80  sh(src, dst);
81  } else {
82  sw(src, dst);
83  }
84 }
85 
86 
87 void MacroAssembler::LoadRoot(Register destination,
88  Heap::RootListIndex index) {
89  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
90 }
91 
92 
93 void MacroAssembler::LoadRoot(Register destination,
94  Heap::RootListIndex index,
95  Condition cond,
96  Register src1, const Operand& src2) {
97  Branch(2, NegateCondition(cond), src1, src2);
98  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
99 }
100 
101 
102 void MacroAssembler::StoreRoot(Register source,
103  Heap::RootListIndex index) {
104  sw(source, MemOperand(s6, index << kPointerSizeLog2));
105 }
106 
107 
108 void MacroAssembler::StoreRoot(Register source,
109  Heap::RootListIndex index,
110  Condition cond,
111  Register src1, const Operand& src2) {
112  Branch(2, NegateCondition(cond), src1, src2);
113  sw(source, MemOperand(s6, index << kPointerSizeLog2));
114 }
115 
116 
117 // Push and pop all registers that can hold pointers.
118 void MacroAssembler::PushSafepointRegisters() {
119  // Safepoints expect a block of kNumSafepointRegisters values on the
120  // stack, so adjust the stack for unsaved registers.
121  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
122  ASSERT(num_unsaved >= 0);
123  if (num_unsaved > 0) {
124  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
125  }
126  MultiPush(kSafepointSavedRegisters);
127 }
128 
129 
130 void MacroAssembler::PopSafepointRegisters() {
131  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
132  MultiPop(kSafepointSavedRegisters);
133  if (num_unsaved > 0) {
134  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
135  }
136 }
137 
138 
139 void MacroAssembler::PushSafepointRegistersAndDoubles() {
140  PushSafepointRegisters();
141  Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
142  for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
143  FPURegister reg = FPURegister::FromAllocationIndex(i);
144  sdc1(reg, MemOperand(sp, i * kDoubleSize));
145  }
146 }
147 
148 
149 void MacroAssembler::PopSafepointRegistersAndDoubles() {
150  for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
151  FPURegister reg = FPURegister::FromAllocationIndex(i);
152  ldc1(reg, MemOperand(sp, i * kDoubleSize));
153  }
154  Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
155  PopSafepointRegisters();
156 }
157 
158 
159 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
160  Register dst) {
161  sw(src, SafepointRegistersAndDoublesSlot(dst));
162 }
163 
164 
165 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
166  sw(src, SafepointRegisterSlot(dst));
167 }
168 
169 
170 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
171  lw(dst, SafepointRegisterSlot(src));
172 }
173 
174 
175 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
176  // The registers are pushed starting with the highest encoding,
177  // which means that lowest encodings are closest to the stack pointer.
178  return kSafepointRegisterStackIndexMap[reg_code];
179 }
180 
181 
182 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
183  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
184 }
185 
186 
187 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
189  // General purpose registers are pushed last on the stack.
190  int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
191  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
192  return MemOperand(sp, doubles_size + register_offset);
193 }
194 
195 
196 void MacroAssembler::InNewSpace(Register object,
197  Register scratch,
198  Condition cc,
199  Label* branch) {
200  ASSERT(cc == eq || cc == ne);
201  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
202  Branch(branch, cc, scratch,
203  Operand(ExternalReference::new_space_start(isolate())));
204 }
205 
206 
207 void MacroAssembler::RecordWriteField(
208  Register object,
209  int offset,
210  Register value,
211  Register dst,
212  RAStatus ra_status,
213  SaveFPRegsMode save_fp,
214  RememberedSetAction remembered_set_action,
215  SmiCheck smi_check) {
216  ASSERT(!AreAliased(value, dst, t8, object));
217  // First, check if a write barrier is even needed. The tests below
218  // catch stores of Smis.
219  Label done;
220 
221  // Skip barrier if writing a smi.
222  if (smi_check == INLINE_SMI_CHECK) {
223  JumpIfSmi(value, &done);
224  }
225 
226  // Although the object register is tagged, the offset is relative to the start
227  // of the object, so so offset must be a multiple of kPointerSize.
228  ASSERT(IsAligned(offset, kPointerSize));
229 
230  Addu(dst, object, Operand(offset - kHeapObjectTag));
231  if (emit_debug_code()) {
232  Label ok;
233  And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
234  Branch(&ok, eq, t8, Operand(zero_reg));
235  stop("Unaligned cell in write barrier");
236  bind(&ok);
237  }
238 
239  RecordWrite(object,
240  dst,
241  value,
242  ra_status,
243  save_fp,
244  remembered_set_action,
246 
247  bind(&done);
248 
249  // Clobber clobbered input registers when running with the debug-code flag
250  // turned on to provoke errors.
251  if (emit_debug_code()) {
252  li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
253  li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
254  }
255 }
256 
257 
258 // Will clobber 4 registers: object, address, scratch, ip. The
259 // register 'object' contains a heap object pointer. The heap object
260 // tag is shifted away.
261 void MacroAssembler::RecordWrite(Register object,
262  Register address,
263  Register value,
264  RAStatus ra_status,
265  SaveFPRegsMode fp_mode,
266  RememberedSetAction remembered_set_action,
267  SmiCheck smi_check) {
268  ASSERT(!AreAliased(object, address, value, t8));
269  ASSERT(!AreAliased(object, address, value, t9));
270 
271  if (emit_debug_code()) {
272  lw(at, MemOperand(address));
273  Assert(
274  eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
275  }
276 
277  // Count number of write barriers in generated code.
278  isolate()->counters()->write_barriers_static()->Increment();
279  // TODO(mstarzinger): Dynamic counter missing.
280 
281  // First, check if a write barrier is even needed. The tests below
282  // catch stores of smis and stores into the young generation.
283  Label done;
284 
285  if (smi_check == INLINE_SMI_CHECK) {
286  ASSERT_EQ(0, kSmiTag);
287  JumpIfSmi(value, &done);
288  }
289 
290  CheckPageFlag(value,
291  value, // Used as scratch.
292  MemoryChunk::kPointersToHereAreInterestingMask,
293  eq,
294  &done);
295  CheckPageFlag(object,
296  value, // Used as scratch.
297  MemoryChunk::kPointersFromHereAreInterestingMask,
298  eq,
299  &done);
300 
301  // Record the actual write.
302  if (ra_status == kRAHasNotBeenSaved) {
303  push(ra);
304  }
305  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
306  CallStub(&stub);
307  if (ra_status == kRAHasNotBeenSaved) {
308  pop(ra);
309  }
310 
311  bind(&done);
312 
313  // Clobber clobbered registers when running with the debug-code flag
314  // turned on to provoke errors.
315  if (emit_debug_code()) {
316  li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
317  li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
318  }
319 }
320 
321 
322 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
323  Register address,
324  Register scratch,
325  SaveFPRegsMode fp_mode,
326  RememberedSetFinalAction and_then) {
327  Label done;
328  if (emit_debug_code()) {
329  Label ok;
330  JumpIfNotInNewSpace(object, scratch, &ok);
331  stop("Remembered set pointer is in new space");
332  bind(&ok);
333  }
334  // Load store buffer top.
335  ExternalReference store_buffer =
336  ExternalReference::store_buffer_top(isolate());
337  li(t8, Operand(store_buffer));
338  lw(scratch, MemOperand(t8));
339  // Store pointer to buffer and increment buffer top.
340  sw(address, MemOperand(scratch));
341  Addu(scratch, scratch, kPointerSize);
342  // Write back new top of buffer.
343  sw(scratch, MemOperand(t8));
344  // Call stub on end of buffer.
345  // Check for end of buffer.
346  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
347  if (and_then == kFallThroughAtEnd) {
348  Branch(&done, eq, t8, Operand(zero_reg));
349  } else {
350  ASSERT(and_then == kReturnAtEnd);
351  Ret(eq, t8, Operand(zero_reg));
352  }
353  push(ra);
354  StoreBufferOverflowStub store_buffer_overflow =
355  StoreBufferOverflowStub(fp_mode);
356  CallStub(&store_buffer_overflow);
357  pop(ra);
358  bind(&done);
359  if (and_then == kReturnAtEnd) {
360  Ret();
361  }
362 }
363 
364 
365 // -----------------------------------------------------------------------------
366 // Allocation support.
367 
368 
369 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
370  Register scratch,
371  Label* miss) {
372  Label same_contexts;
373 
374  ASSERT(!holder_reg.is(scratch));
375  ASSERT(!holder_reg.is(at));
376  ASSERT(!scratch.is(at));
377 
378  // Load current lexical context from the stack frame.
379  lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
380  // In debug mode, make sure the lexical context is set.
381 #ifdef DEBUG
382  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
383  scratch, Operand(zero_reg));
384 #endif
385 
386  // Load the native context of the current context.
387  int offset =
388  Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
389  lw(scratch, FieldMemOperand(scratch, offset));
390  lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
391 
392  // Check the context is a native context.
393  if (emit_debug_code()) {
394  push(holder_reg); // Temporarily save holder on the stack.
395  // Read the first word and compare to the native_context_map.
396  lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
397  LoadRoot(at, Heap::kNativeContextMapRootIndex);
398  Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
399  holder_reg, Operand(at));
400  pop(holder_reg); // Restore holder.
401  }
402 
403  // Check if both contexts are the same.
404  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
405  Branch(&same_contexts, eq, scratch, Operand(at));
406 
407  // Check the context is a native context.
408  if (emit_debug_code()) {
409  push(holder_reg); // Temporarily save holder on the stack.
410  mov(holder_reg, at); // Move at to its holding place.
411  LoadRoot(at, Heap::kNullValueRootIndex);
412  Check(ne, kJSGlobalProxyContextShouldNotBeNull,
413  holder_reg, Operand(at));
414 
415  lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
416  LoadRoot(at, Heap::kNativeContextMapRootIndex);
417  Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
418  holder_reg, Operand(at));
419  // Restore at is not needed. at is reloaded below.
420  pop(holder_reg); // Restore holder.
421  // Restore at to holder's context.
422  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
423  }
424 
425  // Check that the security token in the calling global object is
426  // compatible with the security token in the receiving global
427  // object.
428  int token_offset = Context::kHeaderSize +
429  Context::SECURITY_TOKEN_INDEX * kPointerSize;
430 
431  lw(scratch, FieldMemOperand(scratch, token_offset));
432  lw(at, FieldMemOperand(at, token_offset));
433  Branch(miss, ne, scratch, Operand(at));
434 
435  bind(&same_contexts);
436 }
437 
438 
439 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
440  // First of all we assign the hash seed to scratch.
441  LoadRoot(scratch, Heap::kHashSeedRootIndex);
442  SmiUntag(scratch);
443 
444  // Xor original key with a seed.
445  xor_(reg0, reg0, scratch);
446 
447  // Compute the hash code from the untagged key. This must be kept in sync
448  // with ComputeIntegerHash in utils.h.
449  //
450  // hash = ~hash + (hash << 15);
451  nor(scratch, reg0, zero_reg);
452  sll(at, reg0, 15);
453  addu(reg0, scratch, at);
454 
455  // hash = hash ^ (hash >> 12);
456  srl(at, reg0, 12);
457  xor_(reg0, reg0, at);
458 
459  // hash = hash + (hash << 2);
460  sll(at, reg0, 2);
461  addu(reg0, reg0, at);
462 
463  // hash = hash ^ (hash >> 4);
464  srl(at, reg0, 4);
465  xor_(reg0, reg0, at);
466 
467  // hash = hash * 2057;
468  sll(scratch, reg0, 11);
469  sll(at, reg0, 3);
470  addu(reg0, reg0, at);
471  addu(reg0, reg0, scratch);
472 
473  // hash = hash ^ (hash >> 16);
474  srl(at, reg0, 16);
475  xor_(reg0, reg0, at);
476 }
477 
478 
479 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
480  Register elements,
481  Register key,
482  Register result,
483  Register reg0,
484  Register reg1,
485  Register reg2) {
486  // Register use:
487  //
488  // elements - holds the slow-case elements of the receiver on entry.
489  // Unchanged unless 'result' is the same register.
490  //
491  // key - holds the smi key on entry.
492  // Unchanged unless 'result' is the same register.
493  //
494  //
495  // result - holds the result on exit if the load succeeded.
496  // Allowed to be the same as 'key' or 'result'.
497  // Unchanged on bailout so 'key' or 'result' can be used
498  // in further computation.
499  //
500  // Scratch registers:
501  //
502  // reg0 - holds the untagged key on entry and holds the hash once computed.
503  //
504  // reg1 - Used to hold the capacity mask of the dictionary.
505  //
506  // reg2 - Used for the index into the dictionary.
507  // at - Temporary (avoid MacroAssembler instructions also using 'at').
508  Label done;
509 
510  GetNumberHash(reg0, reg1);
511 
512  // Compute the capacity mask.
513  lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
514  sra(reg1, reg1, kSmiTagSize);
515  Subu(reg1, reg1, Operand(1));
516 
517  // Generate an unrolled loop that performs a few probes before giving up.
518  for (int i = 0; i < kNumberDictionaryProbes; i++) {
519  // Use reg2 for index calculations and keep the hash intact in reg0.
520  mov(reg2, reg0);
521  // Compute the masked index: (hash + i + i * i) & mask.
522  if (i > 0) {
523  Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
524  }
525  and_(reg2, reg2, reg1);
526 
527  // Scale the index by multiplying by the element size.
528  ASSERT(SeededNumberDictionary::kEntrySize == 3);
529  sll(at, reg2, 1); // 2x.
530  addu(reg2, reg2, at); // reg2 = reg2 * 3.
531 
532  // Check if the key is identical to the name.
533  sll(at, reg2, kPointerSizeLog2);
534  addu(reg2, elements, at);
535 
536  lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
537  if (i != kNumberDictionaryProbes - 1) {
538  Branch(&done, eq, key, Operand(at));
539  } else {
540  Branch(miss, ne, key, Operand(at));
541  }
542  }
543 
544  bind(&done);
545  // Check that the value is a normal property.
546  // reg2: elements + (index * kPointerSize).
547  const int kDetailsOffset =
548  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
549  lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
550  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
551  Branch(miss, ne, at, Operand(zero_reg));
552 
553  // Get the value at the masked, scaled index and return.
554  const int kValueOffset =
555  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
556  lw(result, FieldMemOperand(reg2, kValueOffset));
557 }
558 
559 
560 // ---------------------------------------------------------------------------
561 // Instruction macros.
562 
563 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
564  if (rt.is_reg()) {
565  addu(rd, rs, rt.rm());
566  } else {
567  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
568  addiu(rd, rs, rt.imm32_);
569  } else {
570  // li handles the relocation.
571  ASSERT(!rs.is(at));
572  li(at, rt);
573  addu(rd, rs, at);
574  }
575  }
576 }
577 
578 
579 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
580  if (rt.is_reg()) {
581  subu(rd, rs, rt.rm());
582  } else {
583  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
584  addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
585  } else {
586  // li handles the relocation.
587  ASSERT(!rs.is(at));
588  li(at, rt);
589  subu(rd, rs, at);
590  }
591  }
592 }
593 
594 
595 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
596  if (rt.is_reg()) {
597  if (kArchVariant == kLoongson) {
598  mult(rs, rt.rm());
599  mflo(rd);
600  } else {
601  mul(rd, rs, rt.rm());
602  }
603  } else {
604  // li handles the relocation.
605  ASSERT(!rs.is(at));
606  li(at, rt);
607  if (kArchVariant == kLoongson) {
608  mult(rs, at);
609  mflo(rd);
610  } else {
611  mul(rd, rs, at);
612  }
613  }
614 }
615 
616 
617 void MacroAssembler::Mult(Register rs, const Operand& rt) {
618  if (rt.is_reg()) {
619  mult(rs, rt.rm());
620  } else {
621  // li handles the relocation.
622  ASSERT(!rs.is(at));
623  li(at, rt);
624  mult(rs, at);
625  }
626 }
627 
628 
629 void MacroAssembler::Multu(Register rs, const Operand& rt) {
630  if (rt.is_reg()) {
631  multu(rs, rt.rm());
632  } else {
633  // li handles the relocation.
634  ASSERT(!rs.is(at));
635  li(at, rt);
636  multu(rs, at);
637  }
638 }
639 
640 
641 void MacroAssembler::Div(Register rs, const Operand& rt) {
642  if (rt.is_reg()) {
643  div(rs, rt.rm());
644  } else {
645  // li handles the relocation.
646  ASSERT(!rs.is(at));
647  li(at, rt);
648  div(rs, at);
649  }
650 }
651 
652 
653 void MacroAssembler::Divu(Register rs, const Operand& rt) {
654  if (rt.is_reg()) {
655  divu(rs, rt.rm());
656  } else {
657  // li handles the relocation.
658  ASSERT(!rs.is(at));
659  li(at, rt);
660  divu(rs, at);
661  }
662 }
663 
664 
665 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
666  if (rt.is_reg()) {
667  and_(rd, rs, rt.rm());
668  } else {
669  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
670  andi(rd, rs, rt.imm32_);
671  } else {
672  // li handles the relocation.
673  ASSERT(!rs.is(at));
674  li(at, rt);
675  and_(rd, rs, at);
676  }
677  }
678 }
679 
680 
681 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
682  if (rt.is_reg()) {
683  or_(rd, rs, rt.rm());
684  } else {
685  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
686  ori(rd, rs, rt.imm32_);
687  } else {
688  // li handles the relocation.
689  ASSERT(!rs.is(at));
690  li(at, rt);
691  or_(rd, rs, at);
692  }
693  }
694 }
695 
696 
697 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
698  if (rt.is_reg()) {
699  xor_(rd, rs, rt.rm());
700  } else {
701  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
702  xori(rd, rs, rt.imm32_);
703  } else {
704  // li handles the relocation.
705  ASSERT(!rs.is(at));
706  li(at, rt);
707  xor_(rd, rs, at);
708  }
709  }
710 }
711 
712 
713 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
714  if (rt.is_reg()) {
715  nor(rd, rs, rt.rm());
716  } else {
717  // li handles the relocation.
718  ASSERT(!rs.is(at));
719  li(at, rt);
720  nor(rd, rs, at);
721  }
722 }
723 
724 
725 void MacroAssembler::Neg(Register rs, const Operand& rt) {
726  ASSERT(rt.is_reg());
727  ASSERT(!at.is(rs));
728  ASSERT(!at.is(rt.rm()));
729  li(at, -1);
730  xor_(rs, rt.rm(), at);
731 }
732 
733 
734 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
735  if (rt.is_reg()) {
736  slt(rd, rs, rt.rm());
737  } else {
738  if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
739  slti(rd, rs, rt.imm32_);
740  } else {
741  // li handles the relocation.
742  ASSERT(!rs.is(at));
743  li(at, rt);
744  slt(rd, rs, at);
745  }
746  }
747 }
748 
749 
750 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
751  if (rt.is_reg()) {
752  sltu(rd, rs, rt.rm());
753  } else {
754  if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
755  sltiu(rd, rs, rt.imm32_);
756  } else {
757  // li handles the relocation.
758  ASSERT(!rs.is(at));
759  li(at, rt);
760  sltu(rd, rs, at);
761  }
762  }
763 }
764 
765 
766 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
767  if (kArchVariant == kMips32r2) {
768  if (rt.is_reg()) {
769  rotrv(rd, rs, rt.rm());
770  } else {
771  rotr(rd, rs, rt.imm32_);
772  }
773  } else {
774  if (rt.is_reg()) {
775  subu(at, zero_reg, rt.rm());
776  sllv(at, rs, at);
777  srlv(rd, rs, rt.rm());
778  or_(rd, rd, at);
779  } else {
780  if (rt.imm32_ == 0) {
781  srl(rd, rs, 0);
782  } else {
783  srl(at, rs, rt.imm32_);
784  sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
785  or_(rd, rd, at);
786  }
787  }
788  }
789 }
790 
791 
792 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
793  if (kArchVariant == kLoongson) {
794  lw(zero_reg, rs);
795  } else {
796  pref(hint, rs);
797  }
798 }
799 
800 
801 //------------Pseudo-instructions-------------
802 
803 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
804  lwr(rd, rs);
805  lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
806 }
807 
808 
809 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
810  swr(rd, rs);
811  swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
812 }
813 
814 
815 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
817  if (value->IsSmi()) {
818  li(dst, Operand(value), mode);
819  } else {
820  ASSERT(value->IsHeapObject());
821  if (isolate()->heap()->InNewSpace(*value)) {
822  Handle<Cell> cell = isolate()->factory()->NewCell(value);
823  li(dst, Operand(cell));
824  lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
825  } else {
826  li(dst, Operand(value));
827  }
828  }
829 }
830 
831 
832 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
833  ASSERT(!j.is_reg());
834  BlockTrampolinePoolScope block_trampoline_pool(this);
835  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
836  // Normal load of an immediate value which does not need Relocation Info.
837  if (is_int16(j.imm32_)) {
838  addiu(rd, zero_reg, j.imm32_);
839  } else if (!(j.imm32_ & kHiMask)) {
840  ori(rd, zero_reg, j.imm32_);
841  } else if (!(j.imm32_ & kImm16Mask)) {
842  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
843  } else {
844  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
845  ori(rd, rd, (j.imm32_ & kImm16Mask));
846  }
847  } else {
848  if (MustUseReg(j.rmode_)) {
849  RecordRelocInfo(j.rmode_, j.imm32_);
850  }
851  // We always need the same number of instructions as we may need to patch
852  // this code to load another value which may need 2 instructions to load.
853  lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
854  ori(rd, rd, (j.imm32_ & kImm16Mask));
855  }
856 }
857 
858 
859 void MacroAssembler::MultiPush(RegList regs) {
860  int16_t num_to_push = NumberOfBitsSet(regs);
861  int16_t stack_offset = num_to_push * kPointerSize;
862 
863  Subu(sp, sp, Operand(stack_offset));
864  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
865  if ((regs & (1 << i)) != 0) {
866  stack_offset -= kPointerSize;
867  sw(ToRegister(i), MemOperand(sp, stack_offset));
868  }
869  }
870 }
871 
872 
873 void MacroAssembler::MultiPushReversed(RegList regs) {
874  int16_t num_to_push = NumberOfBitsSet(regs);
875  int16_t stack_offset = num_to_push * kPointerSize;
876 
877  Subu(sp, sp, Operand(stack_offset));
878  for (int16_t i = 0; i < kNumRegisters; i++) {
879  if ((regs & (1 << i)) != 0) {
880  stack_offset -= kPointerSize;
881  sw(ToRegister(i), MemOperand(sp, stack_offset));
882  }
883  }
884 }
885 
886 
887 void MacroAssembler::MultiPop(RegList regs) {
888  int16_t stack_offset = 0;
889 
890  for (int16_t i = 0; i < kNumRegisters; i++) {
891  if ((regs & (1 << i)) != 0) {
892  lw(ToRegister(i), MemOperand(sp, stack_offset));
893  stack_offset += kPointerSize;
894  }
895  }
896  addiu(sp, sp, stack_offset);
897 }
898 
899 
900 void MacroAssembler::MultiPopReversed(RegList regs) {
901  int16_t stack_offset = 0;
902 
903  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
904  if ((regs & (1 << i)) != 0) {
905  lw(ToRegister(i), MemOperand(sp, stack_offset));
906  stack_offset += kPointerSize;
907  }
908  }
909  addiu(sp, sp, stack_offset);
910 }
911 
912 
913 void MacroAssembler::MultiPushFPU(RegList regs) {
914  int16_t num_to_push = NumberOfBitsSet(regs);
915  int16_t stack_offset = num_to_push * kDoubleSize;
916 
917  Subu(sp, sp, Operand(stack_offset));
918  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
919  if ((regs & (1 << i)) != 0) {
920  stack_offset -= kDoubleSize;
921  sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
922  }
923  }
924 }
925 
926 
927 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
928  int16_t num_to_push = NumberOfBitsSet(regs);
929  int16_t stack_offset = num_to_push * kDoubleSize;
930 
931  Subu(sp, sp, Operand(stack_offset));
932  for (int16_t i = 0; i < kNumRegisters; i++) {
933  if ((regs & (1 << i)) != 0) {
934  stack_offset -= kDoubleSize;
935  sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
936  }
937  }
938 }
939 
940 
941 void MacroAssembler::MultiPopFPU(RegList regs) {
942  int16_t stack_offset = 0;
943 
944  for (int16_t i = 0; i < kNumRegisters; i++) {
945  if ((regs & (1 << i)) != 0) {
946  ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
947  stack_offset += kDoubleSize;
948  }
949  }
950  addiu(sp, sp, stack_offset);
951 }
952 
953 
954 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
955  int16_t stack_offset = 0;
956 
957  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
958  if ((regs & (1 << i)) != 0) {
959  ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
960  stack_offset += kDoubleSize;
961  }
962  }
963  addiu(sp, sp, stack_offset);
964 }
965 
966 
967 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
968  RegList saved_regs = kJSCallerSaved | ra.bit();
969  MultiPush(saved_regs);
970  AllowExternalCallThatCantCauseGC scope(this);
971 
972  // Save to a0 in case address == t0.
973  Move(a0, address);
974  PrepareCallCFunction(2, t0);
975 
976  li(a1, instructions * kInstrSize);
977  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
978  MultiPop(saved_regs);
979 }
980 
981 
982 void MacroAssembler::Ext(Register rt,
983  Register rs,
984  uint16_t pos,
985  uint16_t size) {
986  ASSERT(pos < 32);
987  ASSERT(pos + size < 33);
988 
989  if (kArchVariant == kMips32r2) {
990  ext_(rt, rs, pos, size);
991  } else {
992  // Move rs to rt and shift it left then right to get the
993  // desired bitfield on the right side and zeroes on the left.
994  int shift_left = 32 - (pos + size);
995  sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
996 
997  int shift_right = 32 - size;
998  if (shift_right > 0) {
999  srl(rt, rt, shift_right);
1000  }
1001  }
1002 }
1003 
1004 
1005 void MacroAssembler::Ins(Register rt,
1006  Register rs,
1007  uint16_t pos,
1008  uint16_t size) {
1009  ASSERT(pos < 32);
1010  ASSERT(pos + size <= 32);
1011  ASSERT(size != 0);
1012 
1013  if (kArchVariant == kMips32r2) {
1014  ins_(rt, rs, pos, size);
1015  } else {
1016  ASSERT(!rt.is(t8) && !rs.is(t8));
1017  Subu(at, zero_reg, Operand(1));
1018  srl(at, at, 32 - size);
1019  and_(t8, rs, at);
1020  sll(t8, t8, pos);
1021  sll(at, at, pos);
1022  nor(at, at, zero_reg);
1023  and_(at, rt, at);
1024  or_(rt, t8, at);
1025  }
1026 }
1027 
1028 
1029 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1030  FPURegister fs,
1031  FPURegister scratch) {
1032  // Move the data from fs to t8.
1033  mfc1(t8, fs);
1034  Cvt_d_uw(fd, t8, scratch);
1035 }
1036 
1037 
1038 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1039  Register rs,
1040  FPURegister scratch) {
1041  // Convert rs to a FP value in fd (and fd + 1).
1042  // We do this by converting rs minus the MSB to avoid sign conversion,
1043  // then adding 2^31 to the result (if needed).
1044 
1045  ASSERT(!fd.is(scratch));
1046  ASSERT(!rs.is(t9));
1047  ASSERT(!rs.is(at));
1048 
1049  // Save rs's MSB to t9.
1050  Ext(t9, rs, 31, 1);
1051  // Remove rs's MSB.
1052  Ext(at, rs, 0, 31);
1053  // Move the result to fd.
1054  mtc1(at, fd);
1055 
1056  // Convert fd to a real FP value.
1057  cvt_d_w(fd, fd);
1058 
1059  Label conversion_done;
1060 
1061  // If rs's MSB was 0, it's done.
1062  // Otherwise we need to add that to the FP register.
1063  Branch(&conversion_done, eq, t9, Operand(zero_reg));
1064 
1065  // Load 2^31 into f20 as its float representation.
1066  li(at, 0x41E00000);
1067  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1068  mtc1(zero_reg, scratch);
1069  // Add it to fd.
1070  add_d(fd, fd, scratch);
1071 
1072  bind(&conversion_done);
1073 }
1074 
1075 
1076 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1077  FPURegister fs,
1078  FPURegister scratch) {
1079  Trunc_uw_d(fs, t8, scratch);
1080  mtc1(t8, fd);
1081 }
1082 
1083 
1084 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1085  if (kArchVariant == kLoongson && fd.is(fs)) {
1086  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1087  trunc_w_d(fd, fs);
1088  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1089  } else {
1090  trunc_w_d(fd, fs);
1091  }
1092 }
1093 
1094 
1095 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1096  if (kArchVariant == kLoongson && fd.is(fs)) {
1097  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1098  round_w_d(fd, fs);
1099  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1100  } else {
1101  round_w_d(fd, fs);
1102  }
1103 }
1104 
1105 
1106 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1107  if (kArchVariant == kLoongson && fd.is(fs)) {
1108  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1109  floor_w_d(fd, fs);
1110  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1111  } else {
1112  floor_w_d(fd, fs);
1113  }
1114 }
1115 
1116 
1117 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1118  if (kArchVariant == kLoongson && fd.is(fs)) {
1119  mfc1(t8, FPURegister::from_code(fs.code() + 1));
1120  ceil_w_d(fd, fs);
1121  mtc1(t8, FPURegister::from_code(fs.code() + 1));
1122  } else {
1123  ceil_w_d(fd, fs);
1124  }
1125 }
1126 
1127 
1128 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1129  Register rs,
1130  FPURegister scratch) {
1131  ASSERT(!fd.is(scratch));
1132  ASSERT(!rs.is(at));
1133 
1134  // Load 2^31 into scratch as its float representation.
1135  li(at, 0x41E00000);
1136  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1137  mtc1(zero_reg, scratch);
1138  // Test if scratch > fd.
1139  // If fd < 2^31 we can convert it normally.
1140  Label simple_convert;
1141  BranchF(&simple_convert, NULL, lt, fd, scratch);
1142 
1143  // First we subtract 2^31 from fd, then trunc it to rs
1144  // and add 2^31 to rs.
1145  sub_d(scratch, fd, scratch);
1146  trunc_w_d(scratch, scratch);
1147  mfc1(rs, scratch);
1148  Or(rs, rs, 1 << 31);
1149 
1150  Label done;
1151  Branch(&done);
1152  // Simple conversion.
1153  bind(&simple_convert);
1154  trunc_w_d(scratch, fd);
1155  mfc1(rs, scratch);
1156 
1157  bind(&done);
1158 }
1159 
1160 
1161 void MacroAssembler::BranchF(Label* target,
1162  Label* nan,
1163  Condition cc,
1164  FPURegister cmp1,
1165  FPURegister cmp2,
1166  BranchDelaySlot bd) {
1167  BlockTrampolinePoolScope block_trampoline_pool(this);
1168  if (cc == al) {
1169  Branch(bd, target);
1170  return;
1171  }
1172 
1173  ASSERT(nan || target);
1174  // Check for unordered (NaN) cases.
1175  if (nan) {
1176  c(UN, D, cmp1, cmp2);
1177  bc1t(nan);
1178  }
1179 
1180  if (target) {
1181  // Here NaN cases were either handled by this function or are assumed to
1182  // have been handled by the caller.
1183  // Unsigned conditions are treated as their signed counterpart.
1184  switch (cc) {
1185  case lt:
1186  c(OLT, D, cmp1, cmp2);
1187  bc1t(target);
1188  break;
1189  case gt:
1190  c(ULE, D, cmp1, cmp2);
1191  bc1f(target);
1192  break;
1193  case ge:
1194  c(ULT, D, cmp1, cmp2);
1195  bc1f(target);
1196  break;
1197  case le:
1198  c(OLE, D, cmp1, cmp2);
1199  bc1t(target);
1200  break;
1201  case eq:
1202  c(EQ, D, cmp1, cmp2);
1203  bc1t(target);
1204  break;
1205  case ueq:
1206  c(UEQ, D, cmp1, cmp2);
1207  bc1t(target);
1208  break;
1209  case ne:
1210  c(EQ, D, cmp1, cmp2);
1211  bc1f(target);
1212  break;
1213  case nue:
1214  c(UEQ, D, cmp1, cmp2);
1215  bc1f(target);
1216  break;
1217  default:
1218  CHECK(0);
1219  };
1220  }
1221 
1222  if (bd == PROTECT) {
1223  nop();
1224  }
1225 }
1226 
1227 
1228 void MacroAssembler::Move(FPURegister dst, double imm) {
1229  static const DoubleRepresentation minus_zero(-0.0);
1230  static const DoubleRepresentation zero(0.0);
1231  DoubleRepresentation value_rep(imm);
1232  // Handle special values first.
1233  bool force_load = dst.is(kDoubleRegZero);
1234  if (value_rep == zero && !force_load) {
1235  mov_d(dst, kDoubleRegZero);
1236  } else if (value_rep == minus_zero && !force_load) {
1237  neg_d(dst, kDoubleRegZero);
1238  } else {
1239  uint32_t lo, hi;
1240  DoubleAsTwoUInt32(imm, &lo, &hi);
1241  // Move the low part of the double into the lower of the corresponding FPU
1242  // register of FPU register pair.
1243  if (lo != 0) {
1244  li(at, Operand(lo));
1245  mtc1(at, dst);
1246  } else {
1247  mtc1(zero_reg, dst);
1248  }
1249  // Move the high part of the double into the higher of the corresponding FPU
1250  // register of FPU register pair.
1251  if (hi != 0) {
1252  li(at, Operand(hi));
1253  mtc1(at, dst.high());
1254  } else {
1255  mtc1(zero_reg, dst.high());
1256  }
1257  }
1258 }
1259 
1260 
1261 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1262  if (kArchVariant == kLoongson) {
1263  Label done;
1264  Branch(&done, ne, rt, Operand(zero_reg));
1265  mov(rd, rs);
1266  bind(&done);
1267  } else {
1268  movz(rd, rs, rt);
1269  }
1270 }
1271 
1272 
1273 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1274  if (kArchVariant == kLoongson) {
1275  Label done;
1276  Branch(&done, eq, rt, Operand(zero_reg));
1277  mov(rd, rs);
1278  bind(&done);
1279  } else {
1280  movn(rd, rs, rt);
1281  }
1282 }
1283 
1284 
1285 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1286  if (kArchVariant == kLoongson) {
1287  // Tests an FP condition code and then conditionally move rs to rd.
1288  // We do not currently use any FPU cc bit other than bit 0.
1289  ASSERT(cc == 0);
1290  ASSERT(!(rs.is(t8) || rd.is(t8)));
1291  Label done;
1292  Register scratch = t8;
1293  // For testing purposes we need to fetch content of the FCSR register and
1294  // than test its cc (floating point condition code) bit (for cc = 0, it is
1295  // 24. bit of the FCSR).
1296  cfc1(scratch, FCSR);
1297  // For the MIPS I, II and III architectures, the contents of scratch is
1298  // UNPREDICTABLE for the instruction immediately following CFC1.
1299  nop();
1300  srl(scratch, scratch, 16);
1301  andi(scratch, scratch, 0x0080);
1302  Branch(&done, eq, scratch, Operand(zero_reg));
1303  mov(rd, rs);
1304  bind(&done);
1305  } else {
1306  movt(rd, rs, cc);
1307  }
1308 }
1309 
1310 
1311 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1312  if (kArchVariant == kLoongson) {
1313  // Tests an FP condition code and then conditionally move rs to rd.
1314  // We do not currently use any FPU cc bit other than bit 0.
1315  ASSERT(cc == 0);
1316  ASSERT(!(rs.is(t8) || rd.is(t8)));
1317  Label done;
1318  Register scratch = t8;
1319  // For testing purposes we need to fetch content of the FCSR register and
1320  // than test its cc (floating point condition code) bit (for cc = 0, it is
1321  // 24. bit of the FCSR).
1322  cfc1(scratch, FCSR);
1323  // For the MIPS I, II and III architectures, the contents of scratch is
1324  // UNPREDICTABLE for the instruction immediately following CFC1.
1325  nop();
1326  srl(scratch, scratch, 16);
1327  andi(scratch, scratch, 0x0080);
1328  Branch(&done, ne, scratch, Operand(zero_reg));
1329  mov(rd, rs);
1330  bind(&done);
1331  } else {
1332  movf(rd, rs, cc);
1333  }
1334 }
1335 
1336 
1337 void MacroAssembler::Clz(Register rd, Register rs) {
1338  if (kArchVariant == kLoongson) {
1339  ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1340  Register mask = t8;
1341  Register scratch = t9;
1342  Label loop, end;
1343  mov(at, rs);
1344  mov(rd, zero_reg);
1345  lui(mask, 0x8000);
1346  bind(&loop);
1347  and_(scratch, at, mask);
1348  Branch(&end, ne, scratch, Operand(zero_reg));
1349  addiu(rd, rd, 1);
1350  Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1351  srl(mask, mask, 1);
1352  bind(&end);
1353  } else {
1354  clz(rd, rs);
1355  }
1356 }
1357 
1358 
1359 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1360  Register result,
1361  DoubleRegister double_input,
1362  Register scratch,
1363  DoubleRegister double_scratch,
1364  Register except_flag,
1365  CheckForInexactConversion check_inexact) {
1366  ASSERT(!result.is(scratch));
1367  ASSERT(!double_input.is(double_scratch));
1368  ASSERT(!except_flag.is(scratch));
1369 
1370  Label done;
1371 
1372  // Clear the except flag (0 = no exception)
1373  mov(except_flag, zero_reg);
1374 
1375  // Test for values that can be exactly represented as a signed 32-bit integer.
1376  cvt_w_d(double_scratch, double_input);
1377  mfc1(result, double_scratch);
1378  cvt_d_w(double_scratch, double_scratch);
1379  BranchF(&done, NULL, eq, double_input, double_scratch);
1380 
1381  int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1382 
1383  if (check_inexact == kDontCheckForInexactConversion) {
1384  // Ignore inexact exceptions.
1385  except_mask &= ~kFCSRInexactFlagMask;
1386  }
1387 
1388  // Save FCSR.
1389  cfc1(scratch, FCSR);
1390  // Disable FPU exceptions.
1391  ctc1(zero_reg, FCSR);
1392 
1393  // Do operation based on rounding mode.
1394  switch (rounding_mode) {
1395  case kRoundToNearest:
1396  Round_w_d(double_scratch, double_input);
1397  break;
1398  case kRoundToZero:
1399  Trunc_w_d(double_scratch, double_input);
1400  break;
1401  case kRoundToPlusInf:
1402  Ceil_w_d(double_scratch, double_input);
1403  break;
1404  case kRoundToMinusInf:
1405  Floor_w_d(double_scratch, double_input);
1406  break;
1407  } // End of switch-statement.
1408 
1409  // Retrieve FCSR.
1410  cfc1(except_flag, FCSR);
1411  // Restore FCSR.
1412  ctc1(scratch, FCSR);
1413  // Move the converted value into the result register.
1414  mfc1(result, double_scratch);
1415 
1416  // Check for fpu exceptions.
1417  And(except_flag, except_flag, Operand(except_mask));
1418 
1419  bind(&done);
1420 }
1421 
1422 
1423 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1424  DoubleRegister double_input,
1425  Label* done) {
1426  DoubleRegister single_scratch = kLithiumScratchDouble.low();
1427  Register scratch = at;
1428  Register scratch2 = t9;
1429 
1430  // Clear cumulative exception flags and save the FCSR.
1431  cfc1(scratch2, FCSR);
1432  ctc1(zero_reg, FCSR);
1433  // Try a conversion to a signed integer.
1434  trunc_w_d(single_scratch, double_input);
1435  mfc1(result, single_scratch);
1436  // Retrieve and restore the FCSR.
1437  cfc1(scratch, FCSR);
1438  ctc1(scratch2, FCSR);
1439  // Check for overflow and NaNs.
1440  And(scratch,
1441  scratch,
1443  // If we had no exceptions we are done.
1444  Branch(done, eq, scratch, Operand(zero_reg));
1445 }
1446 
1447 
1448 void MacroAssembler::TruncateDoubleToI(Register result,
1449  DoubleRegister double_input) {
1450  Label done;
1451 
1452  TryInlineTruncateDoubleToI(result, double_input, &done);
1453 
1454  // If we fell through then inline version didn't succeed - call stub instead.
1455  push(ra);
1456  Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1457  sdc1(double_input, MemOperand(sp, 0));
1458 
1459  DoubleToIStub stub(sp, result, 0, true, true);
1460  CallStub(&stub);
1461 
1462  Addu(sp, sp, Operand(kDoubleSize));
1463  pop(ra);
1464 
1465  bind(&done);
1466 }
1467 
1468 
1469 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1470  Label done;
1471  DoubleRegister double_scratch = f12;
1472  ASSERT(!result.is(object));
1473 
1474  ldc1(double_scratch,
1475  MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1476  TryInlineTruncateDoubleToI(result, double_scratch, &done);
1477 
1478  // If we fell through then inline version didn't succeed - call stub instead.
1479  push(ra);
1480  DoubleToIStub stub(object,
1481  result,
1482  HeapNumber::kValueOffset - kHeapObjectTag,
1483  true,
1484  true);
1485  CallStub(&stub);
1486  pop(ra);
1487 
1488  bind(&done);
1489 }
1490 
1491 
1492 void MacroAssembler::TruncateNumberToI(Register object,
1493  Register result,
1494  Register heap_number_map,
1495  Register scratch,
1496  Label* not_number) {
1497  Label done;
1498  ASSERT(!result.is(object));
1499 
1500  UntagAndJumpIfSmi(result, object, &done);
1501  JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1502  TruncateHeapNumberToI(result, object);
1503 
1504  bind(&done);
1505 }
1506 
1507 
1508 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1509  Register src,
1510  int num_least_bits) {
1511  Ext(dst, src, kSmiTagSize, num_least_bits);
1512 }
1513 
1514 
1515 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1516  Register src,
1517  int num_least_bits) {
1518  And(dst, src, Operand((1 << num_least_bits) - 1));
1519 }
1520 
1521 
1522 // Emulated condtional branches do not emit a nop in the branch delay slot.
1523 //
1524 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1525 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1526  (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1527  (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1528 
1529 
1530 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1531  BranchShort(offset, bdslot);
1532 }
1533 
1534 
1535 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1536  const Operand& rt,
1537  BranchDelaySlot bdslot) {
1538  BranchShort(offset, cond, rs, rt, bdslot);
1539 }
1540 
1541 
1542 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1543  if (L->is_bound()) {
1544  if (is_near(L)) {
1545  BranchShort(L, bdslot);
1546  } else {
1547  Jr(L, bdslot);
1548  }
1549  } else {
1550  if (is_trampoline_emitted()) {
1551  Jr(L, bdslot);
1552  } else {
1553  BranchShort(L, bdslot);
1554  }
1555  }
1556 }
1557 
1558 
1559 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1560  const Operand& rt,
1561  BranchDelaySlot bdslot) {
1562  if (L->is_bound()) {
1563  if (is_near(L)) {
1564  BranchShort(L, cond, rs, rt, bdslot);
1565  } else {
1566  if (cond != cc_always) {
1567  Label skip;
1568  Condition neg_cond = NegateCondition(cond);
1569  BranchShort(&skip, neg_cond, rs, rt);
1570  Jr(L, bdslot);
1571  bind(&skip);
1572  } else {
1573  Jr(L, bdslot);
1574  }
1575  }
1576  } else {
1577  if (is_trampoline_emitted()) {
1578  if (cond != cc_always) {
1579  Label skip;
1580  Condition neg_cond = NegateCondition(cond);
1581  BranchShort(&skip, neg_cond, rs, rt);
1582  Jr(L, bdslot);
1583  bind(&skip);
1584  } else {
1585  Jr(L, bdslot);
1586  }
1587  } else {
1588  BranchShort(L, cond, rs, rt, bdslot);
1589  }
1590  }
1591 }
1592 
1593 
1594 void MacroAssembler::Branch(Label* L,
1595  Condition cond,
1596  Register rs,
1597  Heap::RootListIndex index,
1598  BranchDelaySlot bdslot) {
1599  LoadRoot(at, index);
1600  Branch(L, cond, rs, Operand(at), bdslot);
1601 }
1602 
1603 
1604 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1605  b(offset);
1606 
1607  // Emit a nop in the branch delay slot if required.
1608  if (bdslot == PROTECT)
1609  nop();
1610 }
1611 
1612 
1613 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1614  const Operand& rt,
1615  BranchDelaySlot bdslot) {
1616  BRANCH_ARGS_CHECK(cond, rs, rt);
1617  ASSERT(!rs.is(zero_reg));
1618  Register r2 = no_reg;
1619  Register scratch = at;
1620 
1621  if (rt.is_reg()) {
1622  // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1623  // rt.
1624  BlockTrampolinePoolScope block_trampoline_pool(this);
1625  r2 = rt.rm_;
1626  switch (cond) {
1627  case cc_always:
1628  b(offset);
1629  break;
1630  case eq:
1631  beq(rs, r2, offset);
1632  break;
1633  case ne:
1634  bne(rs, r2, offset);
1635  break;
1636  // Signed comparison.
1637  case greater:
1638  if (r2.is(zero_reg)) {
1639  bgtz(rs, offset);
1640  } else {
1641  slt(scratch, r2, rs);
1642  bne(scratch, zero_reg, offset);
1643  }
1644  break;
1645  case greater_equal:
1646  if (r2.is(zero_reg)) {
1647  bgez(rs, offset);
1648  } else {
1649  slt(scratch, rs, r2);
1650  beq(scratch, zero_reg, offset);
1651  }
1652  break;
1653  case less:
1654  if (r2.is(zero_reg)) {
1655  bltz(rs, offset);
1656  } else {
1657  slt(scratch, rs, r2);
1658  bne(scratch, zero_reg, offset);
1659  }
1660  break;
1661  case less_equal:
1662  if (r2.is(zero_reg)) {
1663  blez(rs, offset);
1664  } else {
1665  slt(scratch, r2, rs);
1666  beq(scratch, zero_reg, offset);
1667  }
1668  break;
1669  // Unsigned comparison.
1670  case Ugreater:
1671  if (r2.is(zero_reg)) {
1672  bgtz(rs, offset);
1673  } else {
1674  sltu(scratch, r2, rs);
1675  bne(scratch, zero_reg, offset);
1676  }
1677  break;
1678  case Ugreater_equal:
1679  if (r2.is(zero_reg)) {
1680  bgez(rs, offset);
1681  } else {
1682  sltu(scratch, rs, r2);
1683  beq(scratch, zero_reg, offset);
1684  }
1685  break;
1686  case Uless:
1687  if (r2.is(zero_reg)) {
1688  // No code needs to be emitted.
1689  return;
1690  } else {
1691  sltu(scratch, rs, r2);
1692  bne(scratch, zero_reg, offset);
1693  }
1694  break;
1695  case Uless_equal:
1696  if (r2.is(zero_reg)) {
1697  b(offset);
1698  } else {
1699  sltu(scratch, r2, rs);
1700  beq(scratch, zero_reg, offset);
1701  }
1702  break;
1703  default:
1704  UNREACHABLE();
1705  }
1706  } else {
1707  // Be careful to always use shifted_branch_offset only just before the
1708  // branch instruction, as the location will be remember for patching the
1709  // target.
1710  BlockTrampolinePoolScope block_trampoline_pool(this);
1711  switch (cond) {
1712  case cc_always:
1713  b(offset);
1714  break;
1715  case eq:
1716  // We don't want any other register but scratch clobbered.
1717  ASSERT(!scratch.is(rs));
1718  r2 = scratch;
1719  li(r2, rt);
1720  beq(rs, r2, offset);
1721  break;
1722  case ne:
1723  // We don't want any other register but scratch clobbered.
1724  ASSERT(!scratch.is(rs));
1725  r2 = scratch;
1726  li(r2, rt);
1727  bne(rs, r2, offset);
1728  break;
1729  // Signed comparison.
1730  case greater:
1731  if (rt.imm32_ == 0) {
1732  bgtz(rs, offset);
1733  } else {
1734  r2 = scratch;
1735  li(r2, rt);
1736  slt(scratch, r2, rs);
1737  bne(scratch, zero_reg, offset);
1738  }
1739  break;
1740  case greater_equal:
1741  if (rt.imm32_ == 0) {
1742  bgez(rs, offset);
1743  } else if (is_int16(rt.imm32_)) {
1744  slti(scratch, rs, rt.imm32_);
1745  beq(scratch, zero_reg, offset);
1746  } else {
1747  r2 = scratch;
1748  li(r2, rt);
1749  slt(scratch, rs, r2);
1750  beq(scratch, zero_reg, offset);
1751  }
1752  break;
1753  case less:
1754  if (rt.imm32_ == 0) {
1755  bltz(rs, offset);
1756  } else if (is_int16(rt.imm32_)) {
1757  slti(scratch, rs, rt.imm32_);
1758  bne(scratch, zero_reg, offset);
1759  } else {
1760  r2 = scratch;
1761  li(r2, rt);
1762  slt(scratch, rs, r2);
1763  bne(scratch, zero_reg, offset);
1764  }
1765  break;
1766  case less_equal:
1767  if (rt.imm32_ == 0) {
1768  blez(rs, offset);
1769  } else {
1770  r2 = scratch;
1771  li(r2, rt);
1772  slt(scratch, r2, rs);
1773  beq(scratch, zero_reg, offset);
1774  }
1775  break;
1776  // Unsigned comparison.
1777  case Ugreater:
1778  if (rt.imm32_ == 0) {
1779  bgtz(rs, offset);
1780  } else {
1781  r2 = scratch;
1782  li(r2, rt);
1783  sltu(scratch, r2, rs);
1784  bne(scratch, zero_reg, offset);
1785  }
1786  break;
1787  case Ugreater_equal:
1788  if (rt.imm32_ == 0) {
1789  bgez(rs, offset);
1790  } else if (is_int16(rt.imm32_)) {
1791  sltiu(scratch, rs, rt.imm32_);
1792  beq(scratch, zero_reg, offset);
1793  } else {
1794  r2 = scratch;
1795  li(r2, rt);
1796  sltu(scratch, rs, r2);
1797  beq(scratch, zero_reg, offset);
1798  }
1799  break;
1800  case Uless:
1801  if (rt.imm32_ == 0) {
1802  // No code needs to be emitted.
1803  return;
1804  } else if (is_int16(rt.imm32_)) {
1805  sltiu(scratch, rs, rt.imm32_);
1806  bne(scratch, zero_reg, offset);
1807  } else {
1808  r2 = scratch;
1809  li(r2, rt);
1810  sltu(scratch, rs, r2);
1811  bne(scratch, zero_reg, offset);
1812  }
1813  break;
1814  case Uless_equal:
1815  if (rt.imm32_ == 0) {
1816  b(offset);
1817  } else {
1818  r2 = scratch;
1819  li(r2, rt);
1820  sltu(scratch, r2, rs);
1821  beq(scratch, zero_reg, offset);
1822  }
1823  break;
1824  default:
1825  UNREACHABLE();
1826  }
1827  }
1828  // Emit a nop in the branch delay slot if required.
1829  if (bdslot == PROTECT)
1830  nop();
1831 }
1832 
1833 
1834 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1835  // We use branch_offset as an argument for the branch instructions to be sure
1836  // it is called just before generating the branch instruction, as needed.
1837 
1838  b(shifted_branch_offset(L, false));
1839 
1840  // Emit a nop in the branch delay slot if required.
1841  if (bdslot == PROTECT)
1842  nop();
1843 }
1844 
1845 
1846 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1847  const Operand& rt,
1848  BranchDelaySlot bdslot) {
1849  BRANCH_ARGS_CHECK(cond, rs, rt);
1850 
1851  int32_t offset = 0;
1852  Register r2 = no_reg;
1853  Register scratch = at;
1854  if (rt.is_reg()) {
1855  BlockTrampolinePoolScope block_trampoline_pool(this);
1856  r2 = rt.rm_;
1857  // Be careful to always use shifted_branch_offset only just before the
1858  // branch instruction, as the location will be remember for patching the
1859  // target.
1860  switch (cond) {
1861  case cc_always:
1862  offset = shifted_branch_offset(L, false);
1863  b(offset);
1864  break;
1865  case eq:
1866  offset = shifted_branch_offset(L, false);
1867  beq(rs, r2, offset);
1868  break;
1869  case ne:
1870  offset = shifted_branch_offset(L, false);
1871  bne(rs, r2, offset);
1872  break;
1873  // Signed comparison.
1874  case greater:
1875  if (r2.is(zero_reg)) {
1876  offset = shifted_branch_offset(L, false);
1877  bgtz(rs, offset);
1878  } else {
1879  slt(scratch, r2, rs);
1880  offset = shifted_branch_offset(L, false);
1881  bne(scratch, zero_reg, offset);
1882  }
1883  break;
1884  case greater_equal:
1885  if (r2.is(zero_reg)) {
1886  offset = shifted_branch_offset(L, false);
1887  bgez(rs, offset);
1888  } else {
1889  slt(scratch, rs, r2);
1890  offset = shifted_branch_offset(L, false);
1891  beq(scratch, zero_reg, offset);
1892  }
1893  break;
1894  case less:
1895  if (r2.is(zero_reg)) {
1896  offset = shifted_branch_offset(L, false);
1897  bltz(rs, offset);
1898  } else {
1899  slt(scratch, rs, r2);
1900  offset = shifted_branch_offset(L, false);
1901  bne(scratch, zero_reg, offset);
1902  }
1903  break;
1904  case less_equal:
1905  if (r2.is(zero_reg)) {
1906  offset = shifted_branch_offset(L, false);
1907  blez(rs, offset);
1908  } else {
1909  slt(scratch, r2, rs);
1910  offset = shifted_branch_offset(L, false);
1911  beq(scratch, zero_reg, offset);
1912  }
1913  break;
1914  // Unsigned comparison.
1915  case Ugreater:
1916  if (r2.is(zero_reg)) {
1917  offset = shifted_branch_offset(L, false);
1918  bgtz(rs, offset);
1919  } else {
1920  sltu(scratch, r2, rs);
1921  offset = shifted_branch_offset(L, false);
1922  bne(scratch, zero_reg, offset);
1923  }
1924  break;
1925  case Ugreater_equal:
1926  if (r2.is(zero_reg)) {
1927  offset = shifted_branch_offset(L, false);
1928  bgez(rs, offset);
1929  } else {
1930  sltu(scratch, rs, r2);
1931  offset = shifted_branch_offset(L, false);
1932  beq(scratch, zero_reg, offset);
1933  }
1934  break;
1935  case Uless:
1936  if (r2.is(zero_reg)) {
1937  // No code needs to be emitted.
1938  return;
1939  } else {
1940  sltu(scratch, rs, r2);
1941  offset = shifted_branch_offset(L, false);
1942  bne(scratch, zero_reg, offset);
1943  }
1944  break;
1945  case Uless_equal:
1946  if (r2.is(zero_reg)) {
1947  offset = shifted_branch_offset(L, false);
1948  b(offset);
1949  } else {
1950  sltu(scratch, r2, rs);
1951  offset = shifted_branch_offset(L, false);
1952  beq(scratch, zero_reg, offset);
1953  }
1954  break;
1955  default:
1956  UNREACHABLE();
1957  }
1958  } else {
1959  // Be careful to always use shifted_branch_offset only just before the
1960  // branch instruction, as the location will be remember for patching the
1961  // target.
1962  BlockTrampolinePoolScope block_trampoline_pool(this);
1963  switch (cond) {
1964  case cc_always:
1965  offset = shifted_branch_offset(L, false);
1966  b(offset);
1967  break;
1968  case eq:
1969  ASSERT(!scratch.is(rs));
1970  r2 = scratch;
1971  li(r2, rt);
1972  offset = shifted_branch_offset(L, false);
1973  beq(rs, r2, offset);
1974  break;
1975  case ne:
1976  ASSERT(!scratch.is(rs));
1977  r2 = scratch;
1978  li(r2, rt);
1979  offset = shifted_branch_offset(L, false);
1980  bne(rs, r2, offset);
1981  break;
1982  // Signed comparison.
1983  case greater:
1984  if (rt.imm32_ == 0) {
1985  offset = shifted_branch_offset(L, false);
1986  bgtz(rs, offset);
1987  } else {
1988  ASSERT(!scratch.is(rs));
1989  r2 = scratch;
1990  li(r2, rt);
1991  slt(scratch, r2, rs);
1992  offset = shifted_branch_offset(L, false);
1993  bne(scratch, zero_reg, offset);
1994  }
1995  break;
1996  case greater_equal:
1997  if (rt.imm32_ == 0) {
1998  offset = shifted_branch_offset(L, false);
1999  bgez(rs, offset);
2000  } else if (is_int16(rt.imm32_)) {
2001  slti(scratch, rs, rt.imm32_);
2002  offset = shifted_branch_offset(L, false);
2003  beq(scratch, zero_reg, offset);
2004  } else {
2005  ASSERT(!scratch.is(rs));
2006  r2 = scratch;
2007  li(r2, rt);
2008  slt(scratch, rs, r2);
2009  offset = shifted_branch_offset(L, false);
2010  beq(scratch, zero_reg, offset);
2011  }
2012  break;
2013  case less:
2014  if (rt.imm32_ == 0) {
2015  offset = shifted_branch_offset(L, false);
2016  bltz(rs, offset);
2017  } else if (is_int16(rt.imm32_)) {
2018  slti(scratch, rs, rt.imm32_);
2019  offset = shifted_branch_offset(L, false);
2020  bne(scratch, zero_reg, offset);
2021  } else {
2022  ASSERT(!scratch.is(rs));
2023  r2 = scratch;
2024  li(r2, rt);
2025  slt(scratch, rs, r2);
2026  offset = shifted_branch_offset(L, false);
2027  bne(scratch, zero_reg, offset);
2028  }
2029  break;
2030  case less_equal:
2031  if (rt.imm32_ == 0) {
2032  offset = shifted_branch_offset(L, false);
2033  blez(rs, offset);
2034  } else {
2035  ASSERT(!scratch.is(rs));
2036  r2 = scratch;
2037  li(r2, rt);
2038  slt(scratch, r2, rs);
2039  offset = shifted_branch_offset(L, false);
2040  beq(scratch, zero_reg, offset);
2041  }
2042  break;
2043  // Unsigned comparison.
2044  case Ugreater:
2045  if (rt.imm32_ == 0) {
2046  offset = shifted_branch_offset(L, false);
2047  bgtz(rs, offset);
2048  } else {
2049  ASSERT(!scratch.is(rs));
2050  r2 = scratch;
2051  li(r2, rt);
2052  sltu(scratch, r2, rs);
2053  offset = shifted_branch_offset(L, false);
2054  bne(scratch, zero_reg, offset);
2055  }
2056  break;
2057  case Ugreater_equal:
2058  if (rt.imm32_ == 0) {
2059  offset = shifted_branch_offset(L, false);
2060  bgez(rs, offset);
2061  } else if (is_int16(rt.imm32_)) {
2062  sltiu(scratch, rs, rt.imm32_);
2063  offset = shifted_branch_offset(L, false);
2064  beq(scratch, zero_reg, offset);
2065  } else {
2066  ASSERT(!scratch.is(rs));
2067  r2 = scratch;
2068  li(r2, rt);
2069  sltu(scratch, rs, r2);
2070  offset = shifted_branch_offset(L, false);
2071  beq(scratch, zero_reg, offset);
2072  }
2073  break;
2074  case Uless:
2075  if (rt.imm32_ == 0) {
2076  // No code needs to be emitted.
2077  return;
2078  } else if (is_int16(rt.imm32_)) {
2079  sltiu(scratch, rs, rt.imm32_);
2080  offset = shifted_branch_offset(L, false);
2081  bne(scratch, zero_reg, offset);
2082  } else {
2083  ASSERT(!scratch.is(rs));
2084  r2 = scratch;
2085  li(r2, rt);
2086  sltu(scratch, rs, r2);
2087  offset = shifted_branch_offset(L, false);
2088  bne(scratch, zero_reg, offset);
2089  }
2090  break;
2091  case Uless_equal:
2092  if (rt.imm32_ == 0) {
2093  offset = shifted_branch_offset(L, false);
2094  b(offset);
2095  } else {
2096  ASSERT(!scratch.is(rs));
2097  r2 = scratch;
2098  li(r2, rt);
2099  sltu(scratch, r2, rs);
2100  offset = shifted_branch_offset(L, false);
2101  beq(scratch, zero_reg, offset);
2102  }
2103  break;
2104  default:
2105  UNREACHABLE();
2106  }
2107  }
2108  // Check that offset could actually hold on an int16_t.
2109  ASSERT(is_int16(offset));
2110  // Emit a nop in the branch delay slot if required.
2111  if (bdslot == PROTECT)
2112  nop();
2113 }
2114 
2115 
2116 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2117  BranchAndLinkShort(offset, bdslot);
2118 }
2119 
2120 
2121 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2122  const Operand& rt,
2123  BranchDelaySlot bdslot) {
2124  BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2125 }
2126 
2127 
2128 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2129  if (L->is_bound()) {
2130  if (is_near(L)) {
2131  BranchAndLinkShort(L, bdslot);
2132  } else {
2133  Jalr(L, bdslot);
2134  }
2135  } else {
2136  if (is_trampoline_emitted()) {
2137  Jalr(L, bdslot);
2138  } else {
2139  BranchAndLinkShort(L, bdslot);
2140  }
2141  }
2142 }
2143 
2144 
2145 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2146  const Operand& rt,
2147  BranchDelaySlot bdslot) {
2148  if (L->is_bound()) {
2149  if (is_near(L)) {
2150  BranchAndLinkShort(L, cond, rs, rt, bdslot);
2151  } else {
2152  Label skip;
2153  Condition neg_cond = NegateCondition(cond);
2154  BranchShort(&skip, neg_cond, rs, rt);
2155  Jalr(L, bdslot);
2156  bind(&skip);
2157  }
2158  } else {
2159  if (is_trampoline_emitted()) {
2160  Label skip;
2161  Condition neg_cond = NegateCondition(cond);
2162  BranchShort(&skip, neg_cond, rs, rt);
2163  Jalr(L, bdslot);
2164  bind(&skip);
2165  } else {
2166  BranchAndLinkShort(L, cond, rs, rt, bdslot);
2167  }
2168  }
2169 }
2170 
2171 
2172 // We need to use a bgezal or bltzal, but they can't be used directly with the
2173 // slt instructions. We could use sub or add instead but we would miss overflow
2174 // cases, so we keep slt and add an intermediate third instruction.
2175 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2176  BranchDelaySlot bdslot) {
2177  bal(offset);
2178 
2179  // Emit a nop in the branch delay slot if required.
2180  if (bdslot == PROTECT)
2181  nop();
2182 }
2183 
2184 
2185 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2186  Register rs, const Operand& rt,
2187  BranchDelaySlot bdslot) {
2188  BRANCH_ARGS_CHECK(cond, rs, rt);
2189  Register r2 = no_reg;
2190  Register scratch = at;
2191 
2192  if (rt.is_reg()) {
2193  r2 = rt.rm_;
2194  } else if (cond != cc_always) {
2195  r2 = scratch;
2196  li(r2, rt);
2197  }
2198 
2199  {
2200  BlockTrampolinePoolScope block_trampoline_pool(this);
2201  switch (cond) {
2202  case cc_always:
2203  bal(offset);
2204  break;
2205  case eq:
2206  bne(rs, r2, 2);
2207  nop();
2208  bal(offset);
2209  break;
2210  case ne:
2211  beq(rs, r2, 2);
2212  nop();
2213  bal(offset);
2214  break;
2215 
2216  // Signed comparison.
2217  case greater:
2218  slt(scratch, r2, rs);
2219  addiu(scratch, scratch, -1);
2220  bgezal(scratch, offset);
2221  break;
2222  case greater_equal:
2223  slt(scratch, rs, r2);
2224  addiu(scratch, scratch, -1);
2225  bltzal(scratch, offset);
2226  break;
2227  case less:
2228  slt(scratch, rs, r2);
2229  addiu(scratch, scratch, -1);
2230  bgezal(scratch, offset);
2231  break;
2232  case less_equal:
2233  slt(scratch, r2, rs);
2234  addiu(scratch, scratch, -1);
2235  bltzal(scratch, offset);
2236  break;
2237 
2238  // Unsigned comparison.
2239  case Ugreater:
2240  sltu(scratch, r2, rs);
2241  addiu(scratch, scratch, -1);
2242  bgezal(scratch, offset);
2243  break;
2244  case Ugreater_equal:
2245  sltu(scratch, rs, r2);
2246  addiu(scratch, scratch, -1);
2247  bltzal(scratch, offset);
2248  break;
2249  case Uless:
2250  sltu(scratch, rs, r2);
2251  addiu(scratch, scratch, -1);
2252  bgezal(scratch, offset);
2253  break;
2254  case Uless_equal:
2255  sltu(scratch, r2, rs);
2256  addiu(scratch, scratch, -1);
2257  bltzal(scratch, offset);
2258  break;
2259 
2260  default:
2261  UNREACHABLE();
2262  }
2263  }
2264  // Emit a nop in the branch delay slot if required.
2265  if (bdslot == PROTECT)
2266  nop();
2267 }
2268 
2269 
2270 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2271  bal(shifted_branch_offset(L, false));
2272 
2273  // Emit a nop in the branch delay slot if required.
2274  if (bdslot == PROTECT)
2275  nop();
2276 }
2277 
2278 
2279 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2280  const Operand& rt,
2281  BranchDelaySlot bdslot) {
2282  BRANCH_ARGS_CHECK(cond, rs, rt);
2283 
2284  int32_t offset = 0;
2285  Register r2 = no_reg;
2286  Register scratch = at;
2287  if (rt.is_reg()) {
2288  r2 = rt.rm_;
2289  } else if (cond != cc_always) {
2290  r2 = scratch;
2291  li(r2, rt);
2292  }
2293 
2294  {
2295  BlockTrampolinePoolScope block_trampoline_pool(this);
2296  switch (cond) {
2297  case cc_always:
2298  offset = shifted_branch_offset(L, false);
2299  bal(offset);
2300  break;
2301  case eq:
2302  bne(rs, r2, 2);
2303  nop();
2304  offset = shifted_branch_offset(L, false);
2305  bal(offset);
2306  break;
2307  case ne:
2308  beq(rs, r2, 2);
2309  nop();
2310  offset = shifted_branch_offset(L, false);
2311  bal(offset);
2312  break;
2313 
2314  // Signed comparison.
2315  case greater:
2316  slt(scratch, r2, rs);
2317  addiu(scratch, scratch, -1);
2318  offset = shifted_branch_offset(L, false);
2319  bgezal(scratch, offset);
2320  break;
2321  case greater_equal:
2322  slt(scratch, rs, r2);
2323  addiu(scratch, scratch, -1);
2324  offset = shifted_branch_offset(L, false);
2325  bltzal(scratch, offset);
2326  break;
2327  case less:
2328  slt(scratch, rs, r2);
2329  addiu(scratch, scratch, -1);
2330  offset = shifted_branch_offset(L, false);
2331  bgezal(scratch, offset);
2332  break;
2333  case less_equal:
2334  slt(scratch, r2, rs);
2335  addiu(scratch, scratch, -1);
2336  offset = shifted_branch_offset(L, false);
2337  bltzal(scratch, offset);
2338  break;
2339 
2340  // Unsigned comparison.
2341  case Ugreater:
2342  sltu(scratch, r2, rs);
2343  addiu(scratch, scratch, -1);
2344  offset = shifted_branch_offset(L, false);
2345  bgezal(scratch, offset);
2346  break;
2347  case Ugreater_equal:
2348  sltu(scratch, rs, r2);
2349  addiu(scratch, scratch, -1);
2350  offset = shifted_branch_offset(L, false);
2351  bltzal(scratch, offset);
2352  break;
2353  case Uless:
2354  sltu(scratch, rs, r2);
2355  addiu(scratch, scratch, -1);
2356  offset = shifted_branch_offset(L, false);
2357  bgezal(scratch, offset);
2358  break;
2359  case Uless_equal:
2360  sltu(scratch, r2, rs);
2361  addiu(scratch, scratch, -1);
2362  offset = shifted_branch_offset(L, false);
2363  bltzal(scratch, offset);
2364  break;
2365 
2366  default:
2367  UNREACHABLE();
2368  }
2369  }
2370  // Check that offset could actually hold on an int16_t.
2371  ASSERT(is_int16(offset));
2372 
2373  // Emit a nop in the branch delay slot if required.
2374  if (bdslot == PROTECT)
2375  nop();
2376 }
2377 
2378 
2379 void MacroAssembler::Jump(Register target,
2380  Condition cond,
2381  Register rs,
2382  const Operand& rt,
2383  BranchDelaySlot bd) {
2384  BlockTrampolinePoolScope block_trampoline_pool(this);
2385  if (cond == cc_always) {
2386  jr(target);
2387  } else {
2388  BRANCH_ARGS_CHECK(cond, rs, rt);
2389  Branch(2, NegateCondition(cond), rs, rt);
2390  jr(target);
2391  }
2392  // Emit a nop in the branch delay slot if required.
2393  if (bd == PROTECT)
2394  nop();
2395 }
2396 
2397 
2398 void MacroAssembler::Jump(intptr_t target,
2399  RelocInfo::Mode rmode,
2400  Condition cond,
2401  Register rs,
2402  const Operand& rt,
2403  BranchDelaySlot bd) {
2404  Label skip;
2405  if (cond != cc_always) {
2406  Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2407  }
2408  // The first instruction of 'li' may be placed in the delay slot.
2409  // This is not an issue, t9 is expected to be clobbered anyway.
2410  li(t9, Operand(target, rmode));
2411  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2412  bind(&skip);
2413 }
2414 
2415 
2416 void MacroAssembler::Jump(Address target,
2417  RelocInfo::Mode rmode,
2418  Condition cond,
2419  Register rs,
2420  const Operand& rt,
2421  BranchDelaySlot bd) {
2422  ASSERT(!RelocInfo::IsCodeTarget(rmode));
2423  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2424 }
2425 
2426 
2427 void MacroAssembler::Jump(Handle<Code> code,
2428  RelocInfo::Mode rmode,
2429  Condition cond,
2430  Register rs,
2431  const Operand& rt,
2432  BranchDelaySlot bd) {
2433  ASSERT(RelocInfo::IsCodeTarget(rmode));
2434  AllowDeferredHandleDereference embedding_raw_address;
2435  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2436 }
2437 
2438 
2439 int MacroAssembler::CallSize(Register target,
2440  Condition cond,
2441  Register rs,
2442  const Operand& rt,
2443  BranchDelaySlot bd) {
2444  int size = 0;
2445 
2446  if (cond == cc_always) {
2447  size += 1;
2448  } else {
2449  size += 3;
2450  }
2451 
2452  if (bd == PROTECT)
2453  size += 1;
2454 
2455  return size * kInstrSize;
2456 }
2457 
2458 
2459 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2460 void MacroAssembler::Call(Register target,
2461  Condition cond,
2462  Register rs,
2463  const Operand& rt,
2464  BranchDelaySlot bd) {
2465  BlockTrampolinePoolScope block_trampoline_pool(this);
2466  Label start;
2467  bind(&start);
2468  if (cond == cc_always) {
2469  jalr(target);
2470  } else {
2471  BRANCH_ARGS_CHECK(cond, rs, rt);
2472  Branch(2, NegateCondition(cond), rs, rt);
2473  jalr(target);
2474  }
2475  // Emit a nop in the branch delay slot if required.
2476  if (bd == PROTECT)
2477  nop();
2478 
2479  ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2480  SizeOfCodeGeneratedSince(&start));
2481 }
2482 
2483 
2484 int MacroAssembler::CallSize(Address target,
2485  RelocInfo::Mode rmode,
2486  Condition cond,
2487  Register rs,
2488  const Operand& rt,
2489  BranchDelaySlot bd) {
2490  int size = CallSize(t9, cond, rs, rt, bd);
2491  return size + 2 * kInstrSize;
2492 }
2493 
2494 
2495 void MacroAssembler::Call(Address target,
2496  RelocInfo::Mode rmode,
2497  Condition cond,
2498  Register rs,
2499  const Operand& rt,
2500  BranchDelaySlot bd) {
2501  BlockTrampolinePoolScope block_trampoline_pool(this);
2502  Label start;
2503  bind(&start);
2504  int32_t target_int = reinterpret_cast<int32_t>(target);
2505  // Must record previous source positions before the
2506  // li() generates a new code target.
2507  positions_recorder()->WriteRecordedPositions();
2508  li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2509  Call(t9, cond, rs, rt, bd);
2510  ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2511  SizeOfCodeGeneratedSince(&start));
2512 }
2513 
2514 
2515 int MacroAssembler::CallSize(Handle<Code> code,
2516  RelocInfo::Mode rmode,
2517  TypeFeedbackId ast_id,
2518  Condition cond,
2519  Register rs,
2520  const Operand& rt,
2521  BranchDelaySlot bd) {
2522  AllowDeferredHandleDereference using_raw_address;
2523  return CallSize(reinterpret_cast<Address>(code.location()),
2524  rmode, cond, rs, rt, bd);
2525 }
2526 
2527 
2528 void MacroAssembler::Call(Handle<Code> code,
2529  RelocInfo::Mode rmode,
2530  TypeFeedbackId ast_id,
2531  Condition cond,
2532  Register rs,
2533  const Operand& rt,
2534  BranchDelaySlot bd) {
2535  BlockTrampolinePoolScope block_trampoline_pool(this);
2536  Label start;
2537  bind(&start);
2538  ASSERT(RelocInfo::IsCodeTarget(rmode));
2539  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2540  SetRecordedAstId(ast_id);
2541  rmode = RelocInfo::CODE_TARGET_WITH_ID;
2542  }
2543  AllowDeferredHandleDereference embedding_raw_address;
2544  Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2545  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2546  SizeOfCodeGeneratedSince(&start));
2547 }
2548 
2549 
2550 void MacroAssembler::Ret(Condition cond,
2551  Register rs,
2552  const Operand& rt,
2553  BranchDelaySlot bd) {
2554  Jump(ra, cond, rs, rt, bd);
2555 }
2556 
2557 
2558 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2559  BlockTrampolinePoolScope block_trampoline_pool(this);
2560 
2561  uint32_t imm28;
2562  imm28 = jump_address(L);
2563  imm28 &= kImm28Mask;
2564  { BlockGrowBufferScope block_buf_growth(this);
2565  // Buffer growth (and relocation) must be blocked for internal references
2566  // until associated instructions are emitted and available to be patched.
2567  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2568  j(imm28);
2569  }
2570  // Emit a nop in the branch delay slot if required.
2571  if (bdslot == PROTECT)
2572  nop();
2573 }
2574 
2575 
2576 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2577  BlockTrampolinePoolScope block_trampoline_pool(this);
2578 
2579  uint32_t imm32;
2580  imm32 = jump_address(L);
2581  { BlockGrowBufferScope block_buf_growth(this);
2582  // Buffer growth (and relocation) must be blocked for internal references
2583  // until associated instructions are emitted and available to be patched.
2584  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2585  lui(at, (imm32 & kHiMask) >> kLuiShift);
2586  ori(at, at, (imm32 & kImm16Mask));
2587  }
2588  jr(at);
2589 
2590  // Emit a nop in the branch delay slot if required.
2591  if (bdslot == PROTECT)
2592  nop();
2593 }
2594 
2595 
2596 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2597  BlockTrampolinePoolScope block_trampoline_pool(this);
2598 
2599  uint32_t imm32;
2600  imm32 = jump_address(L);
2601  { BlockGrowBufferScope block_buf_growth(this);
2602  // Buffer growth (and relocation) must be blocked for internal references
2603  // until associated instructions are emitted and available to be patched.
2604  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2605  lui(at, (imm32 & kHiMask) >> kLuiShift);
2606  ori(at, at, (imm32 & kImm16Mask));
2607  }
2608  jalr(at);
2609 
2610  // Emit a nop in the branch delay slot if required.
2611  if (bdslot == PROTECT)
2612  nop();
2613 }
2614 
2615 
2616 void MacroAssembler::DropAndRet(int drop) {
2617  Ret(USE_DELAY_SLOT);
2618  addiu(sp, sp, drop * kPointerSize);
2619 }
2620 
2621 void MacroAssembler::DropAndRet(int drop,
2622  Condition cond,
2623  Register r1,
2624  const Operand& r2) {
2625  // Both Drop and Ret need to be conditional.
2626  Label skip;
2627  if (cond != cc_always) {
2628  Branch(&skip, NegateCondition(cond), r1, r2);
2629  }
2630 
2631  Drop(drop);
2632  Ret();
2633 
2634  if (cond != cc_always) {
2635  bind(&skip);
2636  }
2637 }
2638 
2639 
2640 void MacroAssembler::Drop(int count,
2641  Condition cond,
2642  Register reg,
2643  const Operand& op) {
2644  if (count <= 0) {
2645  return;
2646  }
2647 
2648  Label skip;
2649 
2650  if (cond != al) {
2651  Branch(&skip, NegateCondition(cond), reg, op);
2652  }
2653 
2654  addiu(sp, sp, count * kPointerSize);
2655 
2656  if (cond != al) {
2657  bind(&skip);
2658  }
2659 }
2660 
2661 
2662 
2663 void MacroAssembler::Swap(Register reg1,
2664  Register reg2,
2665  Register scratch) {
2666  if (scratch.is(no_reg)) {
2667  Xor(reg1, reg1, Operand(reg2));
2668  Xor(reg2, reg2, Operand(reg1));
2669  Xor(reg1, reg1, Operand(reg2));
2670  } else {
2671  mov(scratch, reg1);
2672  mov(reg1, reg2);
2673  mov(reg2, scratch);
2674  }
2675 }
2676 
2677 
2678 void MacroAssembler::Call(Label* target) {
2679  BranchAndLink(target);
2680 }
2681 
2682 
2683 void MacroAssembler::Push(Handle<Object> handle) {
2684  li(at, Operand(handle));
2685  push(at);
2686 }
2687 
2688 
2689 #ifdef ENABLE_DEBUGGER_SUPPORT
2690 
2691 void MacroAssembler::DebugBreak() {
2692  PrepareCEntryArgs(0);
2693  PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2694  CEntryStub ces(1);
2695  ASSERT(AllowThisStubCall(&ces));
2696  Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
2697 }
2698 
2699 #endif // ENABLE_DEBUGGER_SUPPORT
2700 
2701 
2702 // ---------------------------------------------------------------------------
2703 // Exception handling.
2704 
2705 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2706  int handler_index) {
2707  // Adjust this code if not the case.
2708  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2709  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2710  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2711  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2712  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2713  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2714 
2715  // For the JSEntry handler, we must preserve a0-a3 and s0.
2716  // t1-t3 are available. We will build up the handler from the bottom by
2717  // pushing on the stack.
2718  // Set up the code object (t1) and the state (t2) for pushing.
2719  unsigned state =
2720  StackHandler::IndexField::encode(handler_index) |
2721  StackHandler::KindField::encode(kind);
2722  li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2723  li(t2, Operand(state));
2724 
2725  // Push the frame pointer, context, state, and code object.
2726  if (kind == StackHandler::JS_ENTRY) {
2727  ASSERT_EQ(Smi::FromInt(0), 0);
2728  // The second zero_reg indicates no context.
2729  // The first zero_reg is the NULL frame pointer.
2730  // The operands are reversed to match the order of MultiPush/Pop.
2731  Push(zero_reg, zero_reg, t2, t1);
2732  } else {
2733  MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2734  }
2735 
2736  // Link the current handler as the next handler.
2737  li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2738  lw(t1, MemOperand(t2));
2739  push(t1);
2740  // Set this new handler as the current one.
2741  sw(sp, MemOperand(t2));
2742 }
2743 
2744 
2745 void MacroAssembler::PopTryHandler() {
2746  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2747  pop(a1);
2748  Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2749  li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2750  sw(a1, MemOperand(at));
2751 }
2752 
2753 
2754 void MacroAssembler::JumpToHandlerEntry() {
2755  // Compute the handler entry address and jump to it. The handler table is
2756  // a fixed array of (smi-tagged) code offsets.
2757  // v0 = exception, a1 = code object, a2 = state.
2758  lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
2759  Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2760  srl(a2, a2, StackHandler::kKindWidth); // Handler index.
2761  sll(a2, a2, kPointerSizeLog2);
2762  Addu(a2, a3, a2);
2763  lw(a2, MemOperand(a2)); // Smi-tagged offset.
2764  Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
2765  sra(t9, a2, kSmiTagSize);
2766  Addu(t9, t9, a1);
2767  Jump(t9); // Jump.
2768 }
2769 
2770 
2771 void MacroAssembler::Throw(Register value) {
2772  // Adjust this code if not the case.
2773  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2774  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2775  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2776  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2777  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2778  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2779 
2780  // The exception is expected in v0.
2781  Move(v0, value);
2782 
2783  // Drop the stack pointer to the top of the top handler.
2784  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2785  isolate())));
2786  lw(sp, MemOperand(a3));
2787 
2788  // Restore the next handler.
2789  pop(a2);
2790  sw(a2, MemOperand(a3));
2791 
2792  // Get the code object (a1) and state (a2). Restore the context and frame
2793  // pointer.
2794  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2795 
2796  // If the handler is a JS frame, restore the context to the frame.
2797  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2798  // or cp.
2799  Label done;
2800  Branch(&done, eq, cp, Operand(zero_reg));
2801  sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2802  bind(&done);
2803 
2804  JumpToHandlerEntry();
2805 }
2806 
2807 
2808 void MacroAssembler::ThrowUncatchable(Register value) {
2809  // Adjust this code if not the case.
2810  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2811  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2812  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2813  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2814  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2815  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2816 
2817  // The exception is expected in v0.
2818  if (!value.is(v0)) {
2819  mov(v0, value);
2820  }
2821  // Drop the stack pointer to the top of the top stack handler.
2822  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2823  lw(sp, MemOperand(a3));
2824 
2825  // Unwind the handlers until the ENTRY handler is found.
2826  Label fetch_next, check_kind;
2827  jmp(&check_kind);
2828  bind(&fetch_next);
2829  lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2830 
2831  bind(&check_kind);
2832  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2833  lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2834  And(a2, a2, Operand(StackHandler::KindField::kMask));
2835  Branch(&fetch_next, ne, a2, Operand(zero_reg));
2836 
2837  // Set the top handler address to next handler past the top ENTRY handler.
2838  pop(a2);
2839  sw(a2, MemOperand(a3));
2840 
2841  // Get the code object (a1) and state (a2). Clear the context and frame
2842  // pointer (0 was saved in the handler).
2843  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2844 
2845  JumpToHandlerEntry();
2846 }
2847 
2848 
2849 void MacroAssembler::Allocate(int object_size,
2850  Register result,
2851  Register scratch1,
2852  Register scratch2,
2853  Label* gc_required,
2855  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
2856  if (!FLAG_inline_new) {
2857  if (emit_debug_code()) {
2858  // Trash the registers to simulate an allocation failure.
2859  li(result, 0x7091);
2860  li(scratch1, 0x7191);
2861  li(scratch2, 0x7291);
2862  }
2863  jmp(gc_required);
2864  return;
2865  }
2866 
2867  ASSERT(!result.is(scratch1));
2868  ASSERT(!result.is(scratch2));
2869  ASSERT(!scratch1.is(scratch2));
2870  ASSERT(!scratch1.is(t9));
2871  ASSERT(!scratch2.is(t9));
2872  ASSERT(!result.is(t9));
2873 
2874  // Make object size into bytes.
2875  if ((flags & SIZE_IN_WORDS) != 0) {
2876  object_size *= kPointerSize;
2877  }
2878  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
2879 
2880  // Check relative positions of allocation top and limit addresses.
2881  // ARM adds additional checks to make sure the ldm instruction can be
2882  // used. On MIPS we don't have ldm so we don't need additional checks either.
2883  ExternalReference allocation_top =
2884  AllocationUtils::GetAllocationTopReference(isolate(), flags);
2885  ExternalReference allocation_limit =
2886  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2887 
2888  intptr_t top =
2889  reinterpret_cast<intptr_t>(allocation_top.address());
2890  intptr_t limit =
2891  reinterpret_cast<intptr_t>(allocation_limit.address());
2892  ASSERT((limit - top) == kPointerSize);
2893 
2894  // Set up allocation top address and object size registers.
2895  Register topaddr = scratch1;
2896  li(topaddr, Operand(allocation_top));
2897 
2898  // This code stores a temporary value in t9.
2899  if ((flags & RESULT_CONTAINS_TOP) == 0) {
2900  // Load allocation top into result and allocation limit into t9.
2901  lw(result, MemOperand(topaddr));
2902  lw(t9, MemOperand(topaddr, kPointerSize));
2903  } else {
2904  if (emit_debug_code()) {
2905  // Assert that result actually contains top on entry. t9 is used
2906  // immediately below so this use of t9 does not cause difference with
2907  // respect to register content between debug and release mode.
2908  lw(t9, MemOperand(topaddr));
2909  Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2910  }
2911  // Load allocation limit into t9. Result already contains allocation top.
2912  lw(t9, MemOperand(topaddr, limit - top));
2913  }
2914 
2915  if ((flags & DOUBLE_ALIGNMENT) != 0) {
2916  // Align the next allocation. Storing the filler map without checking top is
2917  // safe in new-space because the limit of the heap is aligned there.
2918  ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2920  And(scratch2, result, Operand(kDoubleAlignmentMask));
2921  Label aligned;
2922  Branch(&aligned, eq, scratch2, Operand(zero_reg));
2923  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2924  Branch(gc_required, Ugreater_equal, result, Operand(t9));
2925  }
2926  li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2927  sw(scratch2, MemOperand(result));
2928  Addu(result, result, Operand(kDoubleSize / 2));
2929  bind(&aligned);
2930  }
2931 
2932  // Calculate new top and bail out if new space is exhausted. Use result
2933  // to calculate the new top.
2934  Addu(scratch2, result, Operand(object_size));
2935  Branch(gc_required, Ugreater, scratch2, Operand(t9));
2936  sw(scratch2, MemOperand(topaddr));
2937 
2938  // Tag object if requested.
2939  if ((flags & TAG_OBJECT) != 0) {
2940  Addu(result, result, Operand(kHeapObjectTag));
2941  }
2942 }
2943 
2944 
2945 void MacroAssembler::Allocate(Register object_size,
2946  Register result,
2947  Register scratch1,
2948  Register scratch2,
2949  Label* gc_required,
2950  AllocationFlags flags) {
2951  if (!FLAG_inline_new) {
2952  if (emit_debug_code()) {
2953  // Trash the registers to simulate an allocation failure.
2954  li(result, 0x7091);
2955  li(scratch1, 0x7191);
2956  li(scratch2, 0x7291);
2957  }
2958  jmp(gc_required);
2959  return;
2960  }
2961 
2962  ASSERT(!result.is(scratch1));
2963  ASSERT(!result.is(scratch2));
2964  ASSERT(!scratch1.is(scratch2));
2965  ASSERT(!object_size.is(t9));
2966  ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2967 
2968  // Check relative positions of allocation top and limit addresses.
2969  // ARM adds additional checks to make sure the ldm instruction can be
2970  // used. On MIPS we don't have ldm so we don't need additional checks either.
2971  ExternalReference allocation_top =
2972  AllocationUtils::GetAllocationTopReference(isolate(), flags);
2973  ExternalReference allocation_limit =
2974  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2975  intptr_t top =
2976  reinterpret_cast<intptr_t>(allocation_top.address());
2977  intptr_t limit =
2978  reinterpret_cast<intptr_t>(allocation_limit.address());
2979  ASSERT((limit - top) == kPointerSize);
2980 
2981  // Set up allocation top address and object size registers.
2982  Register topaddr = scratch1;
2983  li(topaddr, Operand(allocation_top));
2984 
2985  // This code stores a temporary value in t9.
2986  if ((flags & RESULT_CONTAINS_TOP) == 0) {
2987  // Load allocation top into result and allocation limit into t9.
2988  lw(result, MemOperand(topaddr));
2989  lw(t9, MemOperand(topaddr, kPointerSize));
2990  } else {
2991  if (emit_debug_code()) {
2992  // Assert that result actually contains top on entry. t9 is used
2993  // immediately below so this use of t9 does not cause difference with
2994  // respect to register content between debug and release mode.
2995  lw(t9, MemOperand(topaddr));
2996  Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2997  }
2998  // Load allocation limit into t9. Result already contains allocation top.
2999  lw(t9, MemOperand(topaddr, limit - top));
3000  }
3001 
3002  if ((flags & DOUBLE_ALIGNMENT) != 0) {
3003  // Align the next allocation. Storing the filler map without checking top is
3004  // safe in new-space because the limit of the heap is aligned there.
3005  ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
3007  And(scratch2, result, Operand(kDoubleAlignmentMask));
3008  Label aligned;
3009  Branch(&aligned, eq, scratch2, Operand(zero_reg));
3010  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3011  Branch(gc_required, Ugreater_equal, result, Operand(t9));
3012  }
3013  li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3014  sw(scratch2, MemOperand(result));
3015  Addu(result, result, Operand(kDoubleSize / 2));
3016  bind(&aligned);
3017  }
3018 
3019  // Calculate new top and bail out if new space is exhausted. Use result
3020  // to calculate the new top. Object size may be in words so a shift is
3021  // required to get the number of bytes.
3022  if ((flags & SIZE_IN_WORDS) != 0) {
3023  sll(scratch2, object_size, kPointerSizeLog2);
3024  Addu(scratch2, result, scratch2);
3025  } else {
3026  Addu(scratch2, result, Operand(object_size));
3027  }
3028  Branch(gc_required, Ugreater, scratch2, Operand(t9));
3029 
3030  // Update allocation top. result temporarily holds the new top.
3031  if (emit_debug_code()) {
3032  And(t9, scratch2, Operand(kObjectAlignmentMask));
3033  Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3034  }
3035  sw(scratch2, MemOperand(topaddr));
3036 
3037  // Tag object if requested.
3038  if ((flags & TAG_OBJECT) != 0) {
3039  Addu(result, result, Operand(kHeapObjectTag));
3040  }
3041 }
3042 
3043 
3044 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3045  Register scratch) {
3046  ExternalReference new_space_allocation_top =
3047  ExternalReference::new_space_allocation_top_address(isolate());
3048 
3049  // Make sure the object has no tag before resetting top.
3050  And(object, object, Operand(~kHeapObjectTagMask));
3051 #ifdef DEBUG
3052  // Check that the object un-allocated is below the current top.
3053  li(scratch, Operand(new_space_allocation_top));
3054  lw(scratch, MemOperand(scratch));
3055  Check(less, kUndoAllocationOfNonAllocatedMemory,
3056  object, Operand(scratch));
3057 #endif
3058  // Write the address of the object to un-allocate as the current top.
3059  li(scratch, Operand(new_space_allocation_top));
3060  sw(object, MemOperand(scratch));
3061 }
3062 
3063 
3064 void MacroAssembler::AllocateTwoByteString(Register result,
3065  Register length,
3066  Register scratch1,
3067  Register scratch2,
3068  Register scratch3,
3069  Label* gc_required) {
3070  // Calculate the number of bytes needed for the characters in the string while
3071  // observing object alignment.
3072  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3073  sll(scratch1, length, 1); // Length in bytes, not chars.
3074  addiu(scratch1, scratch1,
3075  kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3076  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3077 
3078  // Allocate two-byte string in new space.
3079  Allocate(scratch1,
3080  result,
3081  scratch2,
3082  scratch3,
3083  gc_required,
3084  TAG_OBJECT);
3085 
3086  // Set the map, length and hash field.
3087  InitializeNewString(result,
3088  length,
3089  Heap::kStringMapRootIndex,
3090  scratch1,
3091  scratch2);
3092 }
3093 
3094 
3095 void MacroAssembler::AllocateAsciiString(Register result,
3096  Register length,
3097  Register scratch1,
3098  Register scratch2,
3099  Register scratch3,
3100  Label* gc_required) {
3101  // Calculate the number of bytes needed for the characters in the string
3102  // while observing object alignment.
3103  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3104  ASSERT(kCharSize == 1);
3105  addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3106  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3107 
3108  // Allocate ASCII string in new space.
3109  Allocate(scratch1,
3110  result,
3111  scratch2,
3112  scratch3,
3113  gc_required,
3114  TAG_OBJECT);
3115 
3116  // Set the map, length and hash field.
3117  InitializeNewString(result,
3118  length,
3119  Heap::kAsciiStringMapRootIndex,
3120  scratch1,
3121  scratch2);
3122 }
3123 
3124 
3125 void MacroAssembler::AllocateTwoByteConsString(Register result,
3126  Register length,
3127  Register scratch1,
3128  Register scratch2,
3129  Label* gc_required) {
3130  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3131  TAG_OBJECT);
3132  InitializeNewString(result,
3133  length,
3134  Heap::kConsStringMapRootIndex,
3135  scratch1,
3136  scratch2);
3137 }
3138 
3139 
3140 void MacroAssembler::AllocateAsciiConsString(Register result,
3141  Register length,
3142  Register scratch1,
3143  Register scratch2,
3144  Label* gc_required) {
3145  Label allocate_new_space, install_map;
3146  AllocationFlags flags = TAG_OBJECT;
3147 
3148  ExternalReference high_promotion_mode = ExternalReference::
3149  new_space_high_promotion_mode_active_address(isolate());
3150  li(scratch1, Operand(high_promotion_mode));
3151  lw(scratch1, MemOperand(scratch1, 0));
3152  Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
3153 
3154  Allocate(ConsString::kSize,
3155  result,
3156  scratch1,
3157  scratch2,
3158  gc_required,
3159  static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
3160 
3161  jmp(&install_map);
3162 
3163  bind(&allocate_new_space);
3164  Allocate(ConsString::kSize,
3165  result,
3166  scratch1,
3167  scratch2,
3168  gc_required,
3169  flags);
3170 
3171  bind(&install_map);
3172 
3173  InitializeNewString(result,
3174  length,
3175  Heap::kConsAsciiStringMapRootIndex,
3176  scratch1,
3177  scratch2);
3178 }
3179 
3180 
3181 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3182  Register length,
3183  Register scratch1,
3184  Register scratch2,
3185  Label* gc_required) {
3186  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3187  TAG_OBJECT);
3188 
3189  InitializeNewString(result,
3190  length,
3191  Heap::kSlicedStringMapRootIndex,
3192  scratch1,
3193  scratch2);
3194 }
3195 
3196 
3197 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3198  Register length,
3199  Register scratch1,
3200  Register scratch2,
3201  Label* gc_required) {
3202  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3203  TAG_OBJECT);
3204 
3205  InitializeNewString(result,
3206  length,
3207  Heap::kSlicedAsciiStringMapRootIndex,
3208  scratch1,
3209  scratch2);
3210 }
3211 
3212 
3213 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3214  Label* not_unique_name) {
3216  Label succeed;
3217  And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3218  Branch(&succeed, eq, at, Operand(zero_reg));
3219  Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3220 
3221  bind(&succeed);
3222 }
3223 
3224 
3225 // Allocates a heap number or jumps to the label if the young space is full and
3226 // a scavenge is needed.
3227 void MacroAssembler::AllocateHeapNumber(Register result,
3228  Register scratch1,
3229  Register scratch2,
3230  Register heap_number_map,
3231  Label* need_gc,
3232  TaggingMode tagging_mode) {
3233  // Allocate an object in the heap for the heap number and tag it as a heap
3234  // object.
3235  Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3236  tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3237 
3238  // Store heap number map in the allocated object.
3239  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3240  if (tagging_mode == TAG_RESULT) {
3241  sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3242  } else {
3243  sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3244  }
3245 }
3246 
3247 
3248 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3249  FPURegister value,
3250  Register scratch1,
3251  Register scratch2,
3252  Label* gc_required) {
3253  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3254  AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3255  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3256 }
3257 
3258 
3259 // Copies a fixed number of fields of heap objects from src to dst.
3260 void MacroAssembler::CopyFields(Register dst,
3261  Register src,
3262  RegList temps,
3263  int field_count) {
3264  ASSERT((temps & dst.bit()) == 0);
3265  ASSERT((temps & src.bit()) == 0);
3266  // Primitive implementation using only one temporary register.
3267 
3268  Register tmp = no_reg;
3269  // Find a temp register in temps list.
3270  for (int i = 0; i < kNumRegisters; i++) {
3271  if ((temps & (1 << i)) != 0) {
3272  tmp.code_ = i;
3273  break;
3274  }
3275  }
3276  ASSERT(!tmp.is(no_reg));
3277 
3278  for (int i = 0; i < field_count; i++) {
3279  lw(tmp, FieldMemOperand(src, i * kPointerSize));
3280  sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3281  }
3282 }
3283 
3284 
3285 void MacroAssembler::CopyBytes(Register src,
3286  Register dst,
3287  Register length,
3288  Register scratch) {
3289  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3290 
3291  // Align src before copying in word size chunks.
3292  Branch(&byte_loop, le, length, Operand(kPointerSize));
3293  bind(&align_loop_1);
3294  And(scratch, src, kPointerSize - 1);
3295  Branch(&word_loop, eq, scratch, Operand(zero_reg));
3296  lbu(scratch, MemOperand(src));
3297  Addu(src, src, 1);
3298  sb(scratch, MemOperand(dst));
3299  Addu(dst, dst, 1);
3300  Subu(length, length, Operand(1));
3301  Branch(&align_loop_1, ne, length, Operand(zero_reg));
3302 
3303  // Copy bytes in word size chunks.
3304  bind(&word_loop);
3305  if (emit_debug_code()) {
3306  And(scratch, src, kPointerSize - 1);
3307  Assert(eq, kExpectingAlignmentForCopyBytes,
3308  scratch, Operand(zero_reg));
3309  }
3310  Branch(&byte_loop, lt, length, Operand(kPointerSize));
3311  lw(scratch, MemOperand(src));
3312  Addu(src, src, kPointerSize);
3313 
3314  // TODO(kalmard) check if this can be optimized to use sw in most cases.
3315  // Can't use unaligned access - copy byte by byte.
3316  sb(scratch, MemOperand(dst, 0));
3317  srl(scratch, scratch, 8);
3318  sb(scratch, MemOperand(dst, 1));
3319  srl(scratch, scratch, 8);
3320  sb(scratch, MemOperand(dst, 2));
3321  srl(scratch, scratch, 8);
3322  sb(scratch, MemOperand(dst, 3));
3323  Addu(dst, dst, 4);
3324 
3325  Subu(length, length, Operand(kPointerSize));
3326  Branch(&word_loop);
3327 
3328  // Copy the last bytes if any left.
3329  bind(&byte_loop);
3330  Branch(&done, eq, length, Operand(zero_reg));
3331  bind(&byte_loop_1);
3332  lbu(scratch, MemOperand(src));
3333  Addu(src, src, 1);
3334  sb(scratch, MemOperand(dst));
3335  Addu(dst, dst, 1);
3336  Subu(length, length, Operand(1));
3337  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3338  bind(&done);
3339 }
3340 
3341 
3342 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3343  Register end_offset,
3344  Register filler) {
3345  Label loop, entry;
3346  Branch(&entry);
3347  bind(&loop);
3348  sw(filler, MemOperand(start_offset));
3349  Addu(start_offset, start_offset, kPointerSize);
3350  bind(&entry);
3351  Branch(&loop, lt, start_offset, Operand(end_offset));
3352 }
3353 
3354 
3355 void MacroAssembler::CheckFastElements(Register map,
3356  Register scratch,
3357  Label* fail) {
3362  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3363  Branch(fail, hi, scratch,
3364  Operand(Map::kMaximumBitField2FastHoleyElementValue));
3365 }
3366 
3367 
3368 void MacroAssembler::CheckFastObjectElements(Register map,
3369  Register scratch,
3370  Label* fail) {
3375  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3376  Branch(fail, ls, scratch,
3377  Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3378  Branch(fail, hi, scratch,
3379  Operand(Map::kMaximumBitField2FastHoleyElementValue));
3380 }
3381 
3382 
3383 void MacroAssembler::CheckFastSmiElements(Register map,
3384  Register scratch,
3385  Label* fail) {
3388  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3389  Branch(fail, hi, scratch,
3390  Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3391 }
3392 
3393 
3394 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3395  Register key_reg,
3396  Register elements_reg,
3397  Register scratch1,
3398  Register scratch2,
3399  Register scratch3,
3400  Label* fail,
3401  int elements_offset) {
3402  Label smi_value, maybe_nan, have_double_value, is_nan, done;
3403  Register mantissa_reg = scratch2;
3404  Register exponent_reg = scratch3;
3405 
3406  // Handle smi values specially.
3407  JumpIfSmi(value_reg, &smi_value);
3408 
3409  // Ensure that the object is a heap number
3410  CheckMap(value_reg,
3411  scratch1,
3412  Heap::kHeapNumberMapRootIndex,
3413  fail,
3415 
3416  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3417  // in the exponent.
3418  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3419  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3420  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3421 
3422  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3423 
3424  bind(&have_double_value);
3425  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3426  Addu(scratch1, scratch1, elements_reg);
3427  sw(mantissa_reg, FieldMemOperand(
3428  scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
3429  uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
3430  sizeof(kHoleNanLower32);
3431  sw(exponent_reg, FieldMemOperand(scratch1, offset));
3432  jmp(&done);
3433 
3434  bind(&maybe_nan);
3435  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3436  // it's an Infinity, and the non-NaN code path applies.
3437  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3438  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3439  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3440  bind(&is_nan);
3441  // Load canonical NaN for storing into the double array.
3442  LoadRoot(at, Heap::kNanValueRootIndex);
3443  lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3444  lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3445  jmp(&have_double_value);
3446 
3447  bind(&smi_value);
3448  Addu(scratch1, elements_reg,
3449  Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3450  elements_offset));
3451  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3452  Addu(scratch1, scratch1, scratch2);
3453  // scratch1 is now effective address of the double element
3454 
3455  Register untagged_value = elements_reg;
3456  SmiUntag(untagged_value, value_reg);
3457  mtc1(untagged_value, f2);
3458  cvt_d_w(f0, f2);
3459  sdc1(f0, MemOperand(scratch1, 0));
3460  bind(&done);
3461 }
3462 
3463 
3464 void MacroAssembler::CompareMapAndBranch(Register obj,
3465  Register scratch,
3466  Handle<Map> map,
3467  Label* early_success,
3468  Condition cond,
3469  Label* branch_to) {
3470  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3471  CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3472 }
3473 
3474 
3475 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3476  Handle<Map> map,
3477  Label* early_success,
3478  Condition cond,
3479  Label* branch_to) {
3480  Branch(branch_to, cond, obj_map, Operand(map));
3481 }
3482 
3483 
3484 void MacroAssembler::CheckMap(Register obj,
3485  Register scratch,
3486  Handle<Map> map,
3487  Label* fail,
3488  SmiCheckType smi_check_type) {
3489  if (smi_check_type == DO_SMI_CHECK) {
3490  JumpIfSmi(obj, fail);
3491  }
3492  Label success;
3493  CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3494  bind(&success);
3495 }
3496 
3497 
3498 void MacroAssembler::DispatchMap(Register obj,
3499  Register scratch,
3500  Handle<Map> map,
3501  Handle<Code> success,
3502  SmiCheckType smi_check_type) {
3503  Label fail;
3504  if (smi_check_type == DO_SMI_CHECK) {
3505  JumpIfSmi(obj, &fail);
3506  }
3507  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3508  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3509  bind(&fail);
3510 }
3511 
3512 
3513 void MacroAssembler::CheckMap(Register obj,
3514  Register scratch,
3515  Heap::RootListIndex index,
3516  Label* fail,
3517  SmiCheckType smi_check_type) {
3518  if (smi_check_type == DO_SMI_CHECK) {
3519  JumpIfSmi(obj, fail);
3520  }
3521  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3522  LoadRoot(at, index);
3523  Branch(fail, ne, scratch, Operand(at));
3524 }
3525 
3526 
3527 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
3528  if (IsMipsSoftFloatABI) {
3529  Move(dst, v0, v1);
3530  } else {
3531  Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3532  }
3533 }
3534 
3535 
3536 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
3537  if (IsMipsSoftFloatABI) {
3538  Move(dst, a0, a1);
3539  } else {
3540  Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
3541  }
3542 }
3543 
3544 
3545 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3546  if (!IsMipsSoftFloatABI) {
3547  Move(f12, src);
3548  } else {
3549  Move(a0, a1, src);
3550  }
3551 }
3552 
3553 
3554 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3555  if (!IsMipsSoftFloatABI) {
3556  Move(f0, src);
3557  } else {
3558  Move(v0, v1, src);
3559  }
3560 }
3561 
3562 
3563 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3564  DoubleRegister src2) {
3565  if (!IsMipsSoftFloatABI) {
3566  if (src2.is(f12)) {
3567  ASSERT(!src1.is(f14));
3568  Move(f14, src2);
3569  Move(f12, src1);
3570  } else {
3571  Move(f12, src1);
3572  Move(f14, src2);
3573  }
3574  } else {
3575  Move(a0, a1, src1);
3576  Move(a2, a3, src2);
3577  }
3578 }
3579 
3580 
3581 // -----------------------------------------------------------------------------
3582 // JavaScript invokes.
3583 
3584 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3585  const ParameterCount& actual,
3586  Handle<Code> code_constant,
3587  Register code_reg,
3588  Label* done,
3589  bool* definitely_mismatches,
3590  InvokeFlag flag,
3591  const CallWrapper& call_wrapper) {
3592  bool definitely_matches = false;
3593  *definitely_mismatches = false;
3594  Label regular_invoke;
3595 
3596  // Check whether the expected and actual arguments count match. If not,
3597  // setup registers according to contract with ArgumentsAdaptorTrampoline:
3598  // a0: actual arguments count
3599  // a1: function (passed through to callee)
3600  // a2: expected arguments count
3601 
3602  // The code below is made a lot easier because the calling code already sets
3603  // up actual and expected registers according to the contract if values are
3604  // passed in registers.
3605  ASSERT(actual.is_immediate() || actual.reg().is(a0));
3606  ASSERT(expected.is_immediate() || expected.reg().is(a2));
3607  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3608 
3609  if (expected.is_immediate()) {
3610  ASSERT(actual.is_immediate());
3611  if (expected.immediate() == actual.immediate()) {
3612  definitely_matches = true;
3613  } else {
3614  li(a0, Operand(actual.immediate()));
3615  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3616  if (expected.immediate() == sentinel) {
3617  // Don't worry about adapting arguments for builtins that
3618  // don't want that done. Skip adaption code by making it look
3619  // like we have a match between expected and actual number of
3620  // arguments.
3621  definitely_matches = true;
3622  } else {
3623  *definitely_mismatches = true;
3624  li(a2, Operand(expected.immediate()));
3625  }
3626  }
3627  } else if (actual.is_immediate()) {
3628  Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3629  li(a0, Operand(actual.immediate()));
3630  } else {
3631  Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
3632  }
3633 
3634  if (!definitely_matches) {
3635  if (!code_constant.is_null()) {
3636  li(a3, Operand(code_constant));
3637  addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3638  }
3639 
3640  Handle<Code> adaptor =
3641  isolate()->builtins()->ArgumentsAdaptorTrampoline();
3642  if (flag == CALL_FUNCTION) {
3643  call_wrapper.BeforeCall(CallSize(adaptor));
3644  Call(adaptor);
3645  call_wrapper.AfterCall();
3646  if (!*definitely_mismatches) {
3647  Branch(done);
3648  }
3649  } else {
3650  Jump(adaptor, RelocInfo::CODE_TARGET);
3651  }
3652  bind(&regular_invoke);
3653  }
3654 }
3655 
3656 
3657 void MacroAssembler::InvokeCode(Register code,
3658  const ParameterCount& expected,
3659  const ParameterCount& actual,
3660  InvokeFlag flag,
3661  const CallWrapper& call_wrapper) {
3662  // You can't call a function without a valid frame.
3663  ASSERT(flag == JUMP_FUNCTION || has_frame());
3664 
3665  Label done;
3666 
3667  bool definitely_mismatches = false;
3668  InvokePrologue(expected, actual, Handle<Code>::null(), code,
3669  &done, &definitely_mismatches, flag,
3670  call_wrapper);
3671  if (!definitely_mismatches) {
3672  if (flag == CALL_FUNCTION) {
3673  call_wrapper.BeforeCall(CallSize(code));
3674  Call(code);
3675  call_wrapper.AfterCall();
3676  } else {
3677  ASSERT(flag == JUMP_FUNCTION);
3678  Jump(code);
3679  }
3680  // Continue here if InvokePrologue does handle the invocation due to
3681  // mismatched parameter counts.
3682  bind(&done);
3683  }
3684 }
3685 
3686 
3687 void MacroAssembler::InvokeFunction(Register function,
3688  const ParameterCount& actual,
3689  InvokeFlag flag,
3690  const CallWrapper& call_wrapper) {
3691  // You can't call a function without a valid frame.
3692  ASSERT(flag == JUMP_FUNCTION || has_frame());
3693 
3694  // Contract with called JS functions requires that function is passed in a1.
3695  ASSERT(function.is(a1));
3696  Register expected_reg = a2;
3697  Register code_reg = a3;
3698 
3699  lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3700  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3701  lw(expected_reg,
3702  FieldMemOperand(code_reg,
3703  SharedFunctionInfo::kFormalParameterCountOffset));
3704  sra(expected_reg, expected_reg, kSmiTagSize);
3705  lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3706 
3707  ParameterCount expected(expected_reg);
3708  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
3709 }
3710 
3711 
3712 void MacroAssembler::InvokeFunction(Register function,
3713  const ParameterCount& expected,
3714  const ParameterCount& actual,
3715  InvokeFlag flag,
3716  const CallWrapper& call_wrapper) {
3717  // You can't call a function without a valid frame.
3718  ASSERT(flag == JUMP_FUNCTION || has_frame());
3719 
3720  // Contract with called JS functions requires that function is passed in a1.
3721  ASSERT(function.is(a1));
3722 
3723  // Get the function and setup the context.
3724  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3725 
3726  // We call indirectly through the code field in the function to
3727  // allow recompilation to take effect without changing any of the
3728  // call sites.
3729  lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3730  InvokeCode(a3, expected, actual, flag, call_wrapper);
3731 }
3732 
3733 
3734 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3735  const ParameterCount& expected,
3736  const ParameterCount& actual,
3737  InvokeFlag flag,
3738  const CallWrapper& call_wrapper) {
3739  li(a1, function);
3740  InvokeFunction(a1, expected, actual, flag, call_wrapper);
3741 }
3742 
3743 
3744 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3745  Register map,
3746  Register scratch,
3747  Label* fail) {
3748  lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3749  IsInstanceJSObjectType(map, scratch, fail);
3750 }
3751 
3752 
3753 void MacroAssembler::IsInstanceJSObjectType(Register map,
3754  Register scratch,
3755  Label* fail) {
3756  lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3757  Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3758  Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3759 }
3760 
3761 
3762 void MacroAssembler::IsObjectJSStringType(Register object,
3763  Register scratch,
3764  Label* fail) {
3765  ASSERT(kNotStringTag != 0);
3766 
3767  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3768  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3769  And(scratch, scratch, Operand(kIsNotStringMask));
3770  Branch(fail, ne, scratch, Operand(zero_reg));
3771 }
3772 
3773 
3774 void MacroAssembler::IsObjectNameType(Register object,
3775  Register scratch,
3776  Label* fail) {
3777  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3778  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3779  Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
3780 }
3781 
3782 
3783 // ---------------------------------------------------------------------------
3784 // Support functions.
3785 
3786 
3787 void MacroAssembler::TryGetFunctionPrototype(Register function,
3788  Register result,
3789  Register scratch,
3790  Label* miss,
3791  bool miss_on_bound_function) {
3792  // Check that the receiver isn't a smi.
3793  JumpIfSmi(function, miss);
3794 
3795  // Check that the function really is a function. Load map into result reg.
3796  GetObjectType(function, result, scratch);
3797  Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3798 
3799  if (miss_on_bound_function) {
3800  lw(scratch,
3801  FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3802  lw(scratch,
3803  FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3804  And(scratch, scratch,
3805  Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3806  Branch(miss, ne, scratch, Operand(zero_reg));
3807  }
3808 
3809  // Make sure that the function has an instance prototype.
3810  Label non_instance;
3811  lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3812  And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3813  Branch(&non_instance, ne, scratch, Operand(zero_reg));
3814 
3815  // Get the prototype or initial map from the function.
3816  lw(result,
3817  FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3818 
3819  // If the prototype or initial map is the hole, don't return it and
3820  // simply miss the cache instead. This will allow us to allocate a
3821  // prototype object on-demand in the runtime system.
3822  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3823  Branch(miss, eq, result, Operand(t8));
3824 
3825  // If the function does not have an initial map, we're done.
3826  Label done;
3827  GetObjectType(result, scratch, scratch);
3828  Branch(&done, ne, scratch, Operand(MAP_TYPE));
3829 
3830  // Get the prototype from the initial map.
3831  lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3832  jmp(&done);
3833 
3834  // Non-instance prototype: Fetch prototype from constructor field
3835  // in initial map.
3836  bind(&non_instance);
3837  lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3838 
3839  // All done.
3840  bind(&done);
3841 }
3842 
3843 
3844 void MacroAssembler::GetObjectType(Register object,
3845  Register map,
3846  Register type_reg) {
3847  lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3848  lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3849 }
3850 
3851 
3852 // -----------------------------------------------------------------------------
3853 // Runtime calls.
3854 
3855 void MacroAssembler::CallStub(CodeStub* stub,
3856  TypeFeedbackId ast_id,
3857  Condition cond,
3858  Register r1,
3859  const Operand& r2,
3860  BranchDelaySlot bd) {
3861  ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
3862  Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
3863  cond, r1, r2, bd);
3864 }
3865 
3866 
3867 void MacroAssembler::TailCallStub(CodeStub* stub,
3868  Condition cond,
3869  Register r1,
3870  const Operand& r2,
3871  BranchDelaySlot bd) {
3872  Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
3873 }
3874 
3875 
3876 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3877  return ref0.address() - ref1.address();
3878 }
3879 
3880 
3881 void MacroAssembler::CallApiFunctionAndReturn(
3882  Register function_address,
3883  ExternalReference thunk_ref,
3884  int stack_space,
3885  MemOperand return_value_operand,
3886  MemOperand* context_restore_operand) {
3887  ExternalReference next_address =
3888  ExternalReference::handle_scope_next_address(isolate());
3889  const int kNextOffset = 0;
3890  const int kLimitOffset = AddressOffset(
3891  ExternalReference::handle_scope_limit_address(isolate()),
3892  next_address);
3893  const int kLevelOffset = AddressOffset(
3894  ExternalReference::handle_scope_level_address(isolate()),
3895  next_address);
3896 
3897  ASSERT(function_address.is(a1) || function_address.is(a2));
3898 
3899  Label profiler_disabled;
3900  Label end_profiler_check;
3901  bool* is_profiling_flag =
3902  isolate()->cpu_profiler()->is_profiling_address();
3903  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
3904  li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
3905  lb(t9, MemOperand(t9, 0));
3906  Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
3907 
3908  // Additional parameter is the address of the actual callback.
3909  li(t9, Operand(thunk_ref));
3910  jmp(&end_profiler_check);
3911 
3912  bind(&profiler_disabled);
3913  mov(t9, function_address);
3914  bind(&end_profiler_check);
3915 
3916  // Allocate HandleScope in callee-save registers.
3917  li(s3, Operand(next_address));
3918  lw(s0, MemOperand(s3, kNextOffset));
3919  lw(s1, MemOperand(s3, kLimitOffset));
3920  lw(s2, MemOperand(s3, kLevelOffset));
3921  Addu(s2, s2, Operand(1));
3922  sw(s2, MemOperand(s3, kLevelOffset));
3923 
3924  if (FLAG_log_timer_events) {
3925  FrameScope frame(this, StackFrame::MANUAL);
3926  PushSafepointRegisters();
3927  PrepareCallCFunction(1, a0);
3928  li(a0, Operand(ExternalReference::isolate_address(isolate())));
3929  CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
3930  PopSafepointRegisters();
3931  }
3932 
3933  // Native call returns to the DirectCEntry stub which redirects to the
3934  // return address pushed on stack (could have moved after GC).
3935  // DirectCEntry stub itself is generated early and never moves.
3936  DirectCEntryStub stub;
3937  stub.GenerateCall(this, t9);
3938 
3939  if (FLAG_log_timer_events) {
3940  FrameScope frame(this, StackFrame::MANUAL);
3941  PushSafepointRegisters();
3942  PrepareCallCFunction(1, a0);
3943  li(a0, Operand(ExternalReference::isolate_address(isolate())));
3944  CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
3945  PopSafepointRegisters();
3946  }
3947 
3948  Label promote_scheduled_exception;
3949  Label exception_handled;
3950  Label delete_allocated_handles;
3951  Label leave_exit_frame;
3952  Label return_value_loaded;
3953 
3954  // Load value from ReturnValue.
3955  lw(v0, return_value_operand);
3956  bind(&return_value_loaded);
3957 
3958  // No more valid handles (the result handle was the last one). Restore
3959  // previous handle scope.
3960  sw(s0, MemOperand(s3, kNextOffset));
3961  if (emit_debug_code()) {
3962  lw(a1, MemOperand(s3, kLevelOffset));
3963  Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
3964  }
3965  Subu(s2, s2, Operand(1));
3966  sw(s2, MemOperand(s3, kLevelOffset));
3967  lw(at, MemOperand(s3, kLimitOffset));
3968  Branch(&delete_allocated_handles, ne, s1, Operand(at));
3969 
3970  // Check if the function scheduled an exception.
3971  bind(&leave_exit_frame);
3972  LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3973  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3974  lw(t1, MemOperand(at));
3975  Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3976  bind(&exception_handled);
3977 
3978  bool restore_context = context_restore_operand != NULL;
3979  if (restore_context) {
3980  lw(cp, *context_restore_operand);
3981  }
3982  li(s0, Operand(stack_space));
3983  LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
3984 
3985  bind(&promote_scheduled_exception);
3986  {
3987  FrameScope frame(this, StackFrame::INTERNAL);
3988  CallExternalReference(
3989  ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
3990  0);
3991  }
3992  jmp(&exception_handled);
3993 
3994  // HandleScope limit has changed. Delete allocated extensions.
3995  bind(&delete_allocated_handles);
3996  sw(s1, MemOperand(s3, kLimitOffset));
3997  mov(s0, v0);
3998  mov(a0, v0);
3999  PrepareCallCFunction(1, s1);
4000  li(a0, Operand(ExternalReference::isolate_address(isolate())));
4001  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4002  1);
4003  mov(v0, s0);
4004  jmp(&leave_exit_frame);
4005 }
4006 
4007 
4008 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4009  return has_frame_ || !stub->SometimesSetsUpAFrame();
4010 }
4011 
4012 
4013 void MacroAssembler::IllegalOperation(int num_arguments) {
4014  if (num_arguments > 0) {
4015  addiu(sp, sp, num_arguments * kPointerSize);
4016  }
4017  LoadRoot(v0, Heap::kUndefinedValueRootIndex);
4018 }
4019 
4020 
4021 void MacroAssembler::IndexFromHash(Register hash,
4022  Register index) {
4023  // If the hash field contains an array index pick it out. The assert checks
4024  // that the constants for the maximum number of digits for an array index
4025  // cached in the hash field and the number of bits reserved for it does not
4026  // conflict.
4027  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
4028  (1 << String::kArrayIndexValueBits));
4029  // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
4030  // the low kHashShift bits.
4031  STATIC_ASSERT(kSmiTag == 0);
4032  Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4033  sll(index, hash, kSmiTagSize);
4034 }
4035 
4036 
4037 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4038  FPURegister result,
4039  Register scratch1,
4040  Register scratch2,
4041  Register heap_number_map,
4042  Label* not_number,
4043  ObjectToDoubleFlags flags) {
4044  Label done;
4045  if ((flags & OBJECT_NOT_SMI) == 0) {
4046  Label not_smi;
4047  JumpIfNotSmi(object, &not_smi);
4048  // Remove smi tag and convert to double.
4049  sra(scratch1, object, kSmiTagSize);
4050  mtc1(scratch1, result);
4051  cvt_d_w(result, result);
4052  Branch(&done);
4053  bind(&not_smi);
4054  }
4055  // Check for heap number and load double value from it.
4056  lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4057  Branch(not_number, ne, scratch1, Operand(heap_number_map));
4058 
4059  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4060  // If exponent is all ones the number is either a NaN or +/-Infinity.
4061  Register exponent = scratch1;
4062  Register mask_reg = scratch2;
4063  lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4064  li(mask_reg, HeapNumber::kExponentMask);
4065 
4066  And(exponent, exponent, mask_reg);
4067  Branch(not_number, eq, exponent, Operand(mask_reg));
4068  }
4069  ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4070  bind(&done);
4071 }
4072 
4073 
4074 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4075  FPURegister value,
4076  Register scratch1) {
4077  sra(scratch1, smi, kSmiTagSize);
4078  mtc1(scratch1, value);
4079  cvt_d_w(value, value);
4080 }
4081 
4082 
4083 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4084  Register left,
4085  Register right,
4086  Register overflow_dst,
4087  Register scratch) {
4088  ASSERT(!dst.is(overflow_dst));
4089  ASSERT(!dst.is(scratch));
4090  ASSERT(!overflow_dst.is(scratch));
4091  ASSERT(!overflow_dst.is(left));
4092  ASSERT(!overflow_dst.is(right));
4093 
4094  if (left.is(right) && dst.is(left)) {
4095  ASSERT(!dst.is(t9));
4096  ASSERT(!scratch.is(t9));
4097  ASSERT(!left.is(t9));
4098  ASSERT(!right.is(t9));
4099  ASSERT(!overflow_dst.is(t9));
4100  mov(t9, right);
4101  right = t9;
4102  }
4103 
4104  if (dst.is(left)) {
4105  mov(scratch, left); // Preserve left.
4106  addu(dst, left, right); // Left is overwritten.
4107  xor_(scratch, dst, scratch); // Original left.
4108  xor_(overflow_dst, dst, right);
4109  and_(overflow_dst, overflow_dst, scratch);
4110  } else if (dst.is(right)) {
4111  mov(scratch, right); // Preserve right.
4112  addu(dst, left, right); // Right is overwritten.
4113  xor_(scratch, dst, scratch); // Original right.
4114  xor_(overflow_dst, dst, left);
4115  and_(overflow_dst, overflow_dst, scratch);
4116  } else {
4117  addu(dst, left, right);
4118  xor_(overflow_dst, dst, left);
4119  xor_(scratch, dst, right);
4120  and_(overflow_dst, scratch, overflow_dst);
4121  }
4122 }
4123 
4124 
4125 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4126  Register left,
4127  Register right,
4128  Register overflow_dst,
4129  Register scratch) {
4130  ASSERT(!dst.is(overflow_dst));
4131  ASSERT(!dst.is(scratch));
4132  ASSERT(!overflow_dst.is(scratch));
4133  ASSERT(!overflow_dst.is(left));
4134  ASSERT(!overflow_dst.is(right));
4135  ASSERT(!scratch.is(left));
4136  ASSERT(!scratch.is(right));
4137 
4138  // This happens with some crankshaft code. Since Subu works fine if
4139  // left == right, let's not make that restriction here.
4140  if (left.is(right)) {
4141  mov(dst, zero_reg);
4142  mov(overflow_dst, zero_reg);
4143  return;
4144  }
4145 
4146  if (dst.is(left)) {
4147  mov(scratch, left); // Preserve left.
4148  subu(dst, left, right); // Left is overwritten.
4149  xor_(overflow_dst, dst, scratch); // scratch is original left.
4150  xor_(scratch, scratch, right); // scratch is original left.
4151  and_(overflow_dst, scratch, overflow_dst);
4152  } else if (dst.is(right)) {
4153  mov(scratch, right); // Preserve right.
4154  subu(dst, left, right); // Right is overwritten.
4155  xor_(overflow_dst, dst, left);
4156  xor_(scratch, left, scratch); // Original right.
4157  and_(overflow_dst, scratch, overflow_dst);
4158  } else {
4159  subu(dst, left, right);
4160  xor_(overflow_dst, dst, left);
4161  xor_(scratch, left, right);
4162  and_(overflow_dst, scratch, overflow_dst);
4163  }
4164 }
4165 
4166 
4167 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4168  int num_arguments,
4169  SaveFPRegsMode save_doubles) {
4170  // All parameters are on the stack. v0 has the return value after call.
4171 
4172  // If the expected number of arguments of the runtime function is
4173  // constant, we check that the actual number of arguments match the
4174  // expectation.
4175  if (f->nargs >= 0 && f->nargs != num_arguments) {
4176  IllegalOperation(num_arguments);
4177  return;
4178  }
4179 
4180  // TODO(1236192): Most runtime routines don't need the number of
4181  // arguments passed in because it is constant. At some point we
4182  // should remove this need and make the runtime routine entry code
4183  // smarter.
4184  PrepareCEntryArgs(num_arguments);
4185  PrepareCEntryFunction(ExternalReference(f, isolate()));
4186  CEntryStub stub(1, save_doubles);
4187  CallStub(&stub);
4188 }
4189 
4190 
4191 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4192  int num_arguments,
4193  BranchDelaySlot bd) {
4194  PrepareCEntryArgs(num_arguments);
4195  PrepareCEntryFunction(ext);
4196 
4197  CEntryStub stub(1);
4198  CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4199 }
4200 
4201 
4202 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4203  int num_arguments,
4204  int result_size) {
4205  // TODO(1236192): Most runtime routines don't need the number of
4206  // arguments passed in because it is constant. At some point we
4207  // should remove this need and make the runtime routine entry code
4208  // smarter.
4209  PrepareCEntryArgs(num_arguments);
4210  JumpToExternalReference(ext);
4211 }
4212 
4213 
4214 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4215  int num_arguments,
4216  int result_size) {
4217  TailCallExternalReference(ExternalReference(fid, isolate()),
4218  num_arguments,
4219  result_size);
4220 }
4221 
4222 
4223 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4224  BranchDelaySlot bd) {
4225  PrepareCEntryFunction(builtin);
4226  CEntryStub stub(1);
4227  Jump(stub.GetCode(isolate()),
4228  RelocInfo::CODE_TARGET,
4229  al,
4230  zero_reg,
4231  Operand(zero_reg),
4232  bd);
4233 }
4234 
4235 
4236 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4237  InvokeFlag flag,
4238  const CallWrapper& call_wrapper) {
4239  // You can't call a builtin without a valid frame.
4240  ASSERT(flag == JUMP_FUNCTION || has_frame());
4241 
4242  GetBuiltinEntry(t9, id);
4243  if (flag == CALL_FUNCTION) {
4244  call_wrapper.BeforeCall(CallSize(t9));
4245  Call(t9);
4246  call_wrapper.AfterCall();
4247  } else {
4248  ASSERT(flag == JUMP_FUNCTION);
4249  Jump(t9);
4250  }
4251 }
4252 
4253 
4254 void MacroAssembler::GetBuiltinFunction(Register target,
4255  Builtins::JavaScript id) {
4256  // Load the builtins object into target register.
4257  lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4258  lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4259  // Load the JavaScript builtin function from the builtins object.
4260  lw(target, FieldMemOperand(target,
4261  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4262 }
4263 
4264 
4265 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4266  ASSERT(!target.is(a1));
4267  GetBuiltinFunction(a1, id);
4268  // Load the code entry point from the builtins object.
4269  lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4270 }
4271 
4272 
4273 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4274  Register scratch1, Register scratch2) {
4275  if (FLAG_native_code_counters && counter->Enabled()) {
4276  li(scratch1, Operand(value));
4277  li(scratch2, Operand(ExternalReference(counter)));
4278  sw(scratch1, MemOperand(scratch2));
4279  }
4280 }
4281 
4282 
4283 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4284  Register scratch1, Register scratch2) {
4285  ASSERT(value > 0);
4286  if (FLAG_native_code_counters && counter->Enabled()) {
4287  li(scratch2, Operand(ExternalReference(counter)));
4288  lw(scratch1, MemOperand(scratch2));
4289  Addu(scratch1, scratch1, Operand(value));
4290  sw(scratch1, MemOperand(scratch2));
4291  }
4292 }
4293 
4294 
4295 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4296  Register scratch1, Register scratch2) {
4297  ASSERT(value > 0);
4298  if (FLAG_native_code_counters && counter->Enabled()) {
4299  li(scratch2, Operand(ExternalReference(counter)));
4300  lw(scratch1, MemOperand(scratch2));
4301  Subu(scratch1, scratch1, Operand(value));
4302  sw(scratch1, MemOperand(scratch2));
4303  }
4304 }
4305 
4306 
4307 // -----------------------------------------------------------------------------
4308 // Debugging.
4309 
4310 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4311  Register rs, Operand rt) {
4312  if (emit_debug_code())
4313  Check(cc, reason, rs, rt);
4314 }
4315 
4316 
4317 void MacroAssembler::AssertFastElements(Register elements) {
4318  if (emit_debug_code()) {
4319  ASSERT(!elements.is(at));
4320  Label ok;
4321  push(elements);
4322  lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4323  LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4324  Branch(&ok, eq, elements, Operand(at));
4325  LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4326  Branch(&ok, eq, elements, Operand(at));
4327  LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4328  Branch(&ok, eq, elements, Operand(at));
4329  Abort(kJSObjectWithFastElementsMapHasSlowElements);
4330  bind(&ok);
4331  pop(elements);
4332  }
4333 }
4334 
4335 
4336 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4337  Register rs, Operand rt) {
4338  Label L;
4339  Branch(&L, cc, rs, rt);
4340  Abort(reason);
4341  // Will not return here.
4342  bind(&L);
4343 }
4344 
4345 
4346 void MacroAssembler::Abort(BailoutReason reason) {
4347  Label abort_start;
4348  bind(&abort_start);
4349 #ifdef DEBUG
4350  const char* msg = GetBailoutReason(reason);
4351  if (msg != NULL) {
4352  RecordComment("Abort message: ");
4353  RecordComment(msg);
4354  }
4355 
4356  if (FLAG_trap_on_abort) {
4357  stop(msg);
4358  return;
4359  }
4360 #endif
4361 
4362  li(a0, Operand(Smi::FromInt(reason)));
4363  push(a0);
4364  // Disable stub call restrictions to always allow calls to abort.
4365  if (!has_frame_) {
4366  // We don't actually want to generate a pile of code for this, so just
4367  // claim there is a stack frame, without generating one.
4368  FrameScope scope(this, StackFrame::NONE);
4369  CallRuntime(Runtime::kAbort, 1);
4370  } else {
4371  CallRuntime(Runtime::kAbort, 1);
4372  }
4373  // Will not return here.
4374  if (is_trampoline_pool_blocked()) {
4375  // If the calling code cares about the exact number of
4376  // instructions generated, we insert padding here to keep the size
4377  // of the Abort macro constant.
4378  // Currently in debug mode with debug_code enabled the number of
4379  // generated instructions is 10, so we use this as a maximum value.
4380  static const int kExpectedAbortInstructions = 10;
4381  int abort_instructions = InstructionsGeneratedSince(&abort_start);
4382  ASSERT(abort_instructions <= kExpectedAbortInstructions);
4383  while (abort_instructions++ < kExpectedAbortInstructions) {
4384  nop();
4385  }
4386  }
4387 }
4388 
4389 
4390 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4391  if (context_chain_length > 0) {
4392  // Move up the chain of contexts to the context containing the slot.
4393  lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4394  for (int i = 1; i < context_chain_length; i++) {
4395  lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4396  }
4397  } else {
4398  // Slot is in the current function context. Move it into the
4399  // destination register in case we store into it (the write barrier
4400  // cannot be allowed to destroy the context in esi).
4401  Move(dst, cp);
4402  }
4403 }
4404 
4405 
4406 void MacroAssembler::LoadTransitionedArrayMapConditional(
4407  ElementsKind expected_kind,
4408  ElementsKind transitioned_kind,
4409  Register map_in_out,
4410  Register scratch,
4411  Label* no_map_match) {
4412  // Load the global or builtins object from the current context.
4413  lw(scratch,
4414  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4415  lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4416 
4417  // Check that the function's map is the same as the expected cached map.
4418  lw(scratch,
4419  MemOperand(scratch,
4420  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4421  size_t offset = expected_kind * kPointerSize +
4422  FixedArrayBase::kHeaderSize;
4423  lw(at, FieldMemOperand(scratch, offset));
4424  Branch(no_map_match, ne, map_in_out, Operand(at));
4425 
4426  // Use the transitioned cached map.
4427  offset = transitioned_kind * kPointerSize +
4428  FixedArrayBase::kHeaderSize;
4429  lw(map_in_out, FieldMemOperand(scratch, offset));
4430 }
4431 
4432 
4433 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4434  // Load the global or builtins object from the current context.
4435  lw(function,
4436  MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4437  // Load the native context from the global or builtins object.
4438  lw(function, FieldMemOperand(function,
4439  GlobalObject::kNativeContextOffset));
4440  // Load the function from the native context.
4441  lw(function, MemOperand(function, Context::SlotOffset(index)));
4442 }
4443 
4444 
4445 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4446  Register map,
4447  Register scratch) {
4448  // Load the initial map. The global functions all have initial maps.
4449  lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4450  if (emit_debug_code()) {
4451  Label ok, fail;
4452  CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4453  Branch(&ok);
4454  bind(&fail);
4455  Abort(kGlobalFunctionsMustHaveInitialMap);
4456  bind(&ok);
4457  }
4458 }
4459 
4460 
4461 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
4462  if (frame_mode == BUILD_STUB_FRAME) {
4463  Push(ra, fp, cp);
4464  Push(Smi::FromInt(StackFrame::STUB));
4465  // Adjust FP to point to saved FP.
4466  Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4467  } else {
4468  PredictableCodeSizeScope predictible_code_size_scope(
4469  this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
4470  // The following three instructions must remain together and unmodified
4471  // for code aging to work properly.
4472  if (isolate()->IsCodePreAgingActive()) {
4473  // Pre-age the code.
4474  Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4475  nop(Assembler::CODE_AGE_MARKER_NOP);
4476  // Load the stub address to t9 and call it,
4477  // GetCodeAgeAndParity() extracts the stub address from this instruction.
4478  li(t9,
4479  Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4480  CONSTANT_SIZE);
4481  nop(); // Prevent jalr to jal optimization.
4482  jalr(t9, a0);
4483  nop(); // Branch delay slot nop.
4484  nop(); // Pad the empty space.
4485  } else {
4486  Push(ra, fp, cp, a1);
4487  nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4488  // Adjust fp to point to caller's fp.
4489  Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4490  }
4491  }
4492 }
4493 
4494 
4495 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4496  addiu(sp, sp, -5 * kPointerSize);
4497  li(t8, Operand(Smi::FromInt(type)));
4498  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4499  sw(ra, MemOperand(sp, 4 * kPointerSize));
4500  sw(fp, MemOperand(sp, 3 * kPointerSize));
4501  sw(cp, MemOperand(sp, 2 * kPointerSize));
4502  sw(t8, MemOperand(sp, 1 * kPointerSize));
4503  sw(t9, MemOperand(sp, 0 * kPointerSize));
4504  // Adjust FP to point to saved FP.
4505  Addu(fp, sp,
4506  Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4507 }
4508 
4509 
4510 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4511  mov(sp, fp);
4512  lw(fp, MemOperand(sp, 0 * kPointerSize));
4513  lw(ra, MemOperand(sp, 1 * kPointerSize));
4514  addiu(sp, sp, 2 * kPointerSize);
4515 }
4516 
4517 
4518 void MacroAssembler::EnterExitFrame(bool save_doubles,
4519  int stack_space) {
4520  // Set up the frame structure on the stack.
4521  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4522  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4523  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4524 
4525  // This is how the stack will look:
4526  // fp + 2 (==kCallerSPDisplacement) - old stack's end
4527  // [fp + 1 (==kCallerPCOffset)] - saved old ra
4528  // [fp + 0 (==kCallerFPOffset)] - saved old fp
4529  // [fp - 1 (==kSPOffset)] - sp of the called function
4530  // [fp - 2 (==kCodeOffset)] - CodeObject
4531  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4532  // new stack (will contain saved ra)
4533 
4534  // Save registers.
4535  addiu(sp, sp, -4 * kPointerSize);
4536  sw(ra, MemOperand(sp, 3 * kPointerSize));
4537  sw(fp, MemOperand(sp, 2 * kPointerSize));
4538  addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4539 
4540  if (emit_debug_code()) {
4541  sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4542  }
4543 
4544  // Accessed from ExitFrame::code_slot.
4545  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4546  sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4547 
4548  // Save the frame pointer and the context in top.
4549  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4550  sw(fp, MemOperand(t8));
4551  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4552  sw(cp, MemOperand(t8));
4553 
4554  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4555  if (save_doubles) {
4556  // The stack must be allign to 0 modulo 8 for stores with sdc1.
4557  ASSERT(kDoubleSize == frame_alignment);
4558  if (frame_alignment > 0) {
4559  ASSERT(IsPowerOf2(frame_alignment));
4560  And(sp, sp, Operand(-frame_alignment)); // Align stack.
4561  }
4562  int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4563  Subu(sp, sp, Operand(space));
4564  // Remember: we only need to save every 2nd double FPU value.
4565  for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4566  FPURegister reg = FPURegister::from_code(i);
4567  sdc1(reg, MemOperand(sp, i * kDoubleSize));
4568  }
4569  }
4570 
4571  // Reserve place for the return address, stack space and an optional slot
4572  // (used by the DirectCEntryStub to hold the return value if a struct is
4573  // returned) and align the frame preparing for calling the runtime function.
4574  ASSERT(stack_space >= 0);
4575  Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4576  if (frame_alignment > 0) {
4577  ASSERT(IsPowerOf2(frame_alignment));
4578  And(sp, sp, Operand(-frame_alignment)); // Align stack.
4579  }
4580 
4581  // Set the exit frame sp value to point just before the return address
4582  // location.
4583  addiu(at, sp, kPointerSize);
4584  sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4585 }
4586 
4587 
4588 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4589  Register argument_count,
4590  bool restore_context,
4591  bool do_return) {
4592  // Optionally restore all double registers.
4593  if (save_doubles) {
4594  // Remember: we only need to restore every 2nd double FPU value.
4595  lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4596  for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4597  FPURegister reg = FPURegister::from_code(i);
4598  ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4599  }
4600  }
4601 
4602  // Clear top frame.
4603  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4604  sw(zero_reg, MemOperand(t8));
4605 
4606  // Restore current context from top and clear it in debug mode.
4607  if (restore_context) {
4608  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4609  lw(cp, MemOperand(t8));
4610  }
4611 #ifdef DEBUG
4612  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4613  sw(a3, MemOperand(t8));
4614 #endif
4615 
4616  // Pop the arguments, restore registers, and return.
4617  mov(sp, fp); // Respect ABI stack constraint.
4618  lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4619  lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4620 
4621  if (argument_count.is_valid()) {
4622  sll(t8, argument_count, kPointerSizeLog2);
4623  addu(sp, sp, t8);
4624  }
4625 
4626  if (do_return) {
4627  Ret(USE_DELAY_SLOT);
4628  // If returning, the instruction in the delay slot will be the addiu below.
4629  }
4630  addiu(sp, sp, 8);
4631 }
4632 
4633 
4634 void MacroAssembler::InitializeNewString(Register string,
4635  Register length,
4636  Heap::RootListIndex map_index,
4637  Register scratch1,
4638  Register scratch2) {
4639  sll(scratch1, length, kSmiTagSize);
4640  LoadRoot(scratch2, map_index);
4641  sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4642  li(scratch1, Operand(String::kEmptyHashField));
4643  sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4644  sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4645 }
4646 
4647 
4648 int MacroAssembler::ActivationFrameAlignment() {
4649 #if V8_HOST_ARCH_MIPS
4650  // Running on the real platform. Use the alignment as mandated by the local
4651  // environment.
4652  // Note: This will break if we ever start generating snapshots on one Mips
4653  // platform for another Mips platform with a different alignment.
4654  return OS::ActivationFrameAlignment();
4655 #else // V8_HOST_ARCH_MIPS
4656  // If we are using the simulator then we should always align to the expected
4657  // alignment. As the simulator is used to generate snapshots we do not know
4658  // if the target platform will need alignment, so this is controlled from a
4659  // flag.
4660  return FLAG_sim_stack_alignment;
4661 #endif // V8_HOST_ARCH_MIPS
4662 }
4663 
4664 
4665 void MacroAssembler::AssertStackIsAligned() {
4666  if (emit_debug_code()) {
4667  const int frame_alignment = ActivationFrameAlignment();
4668  const int frame_alignment_mask = frame_alignment - 1;
4669 
4670  if (frame_alignment > kPointerSize) {
4671  Label alignment_as_expected;
4672  ASSERT(IsPowerOf2(frame_alignment));
4673  andi(at, sp, frame_alignment_mask);
4674  Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4675  // Don't use Check here, as it will call Runtime_Abort re-entering here.
4676  stop("Unexpected stack alignment");
4677  bind(&alignment_as_expected);
4678  }
4679  }
4680 }
4681 
4682 
4683 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4684  Register reg,
4685  Register scratch,
4686  Label* not_power_of_two_or_zero) {
4687  Subu(scratch, reg, Operand(1));
4688  Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4689  scratch, Operand(zero_reg));
4690  and_(at, scratch, reg); // In the delay slot.
4691  Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4692 }
4693 
4694 
4695 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4696  ASSERT(!reg.is(overflow));
4697  mov(overflow, reg); // Save original value.
4698  SmiTag(reg);
4699  xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
4700 }
4701 
4702 
4703 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4704  Register src,
4705  Register overflow) {
4706  if (dst.is(src)) {
4707  // Fall back to slower case.
4708  SmiTagCheckOverflow(dst, overflow);
4709  } else {
4710  ASSERT(!dst.is(src));
4711  ASSERT(!dst.is(overflow));
4712  ASSERT(!src.is(overflow));
4713  SmiTag(dst, src);
4714  xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
4715  }
4716 }
4717 
4718 
4719 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4720  Register src,
4721  Label* smi_case) {
4722  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4723  SmiUntag(dst, src);
4724 }
4725 
4726 
4727 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4728  Register src,
4729  Label* non_smi_case) {
4730  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4731  SmiUntag(dst, src);
4732 }
4733 
4734 void MacroAssembler::JumpIfSmi(Register value,
4735  Label* smi_label,
4736  Register scratch,
4737  BranchDelaySlot bd) {
4738  ASSERT_EQ(0, kSmiTag);
4739  andi(scratch, value, kSmiTagMask);
4740  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4741 }
4742 
4743 void MacroAssembler::JumpIfNotSmi(Register value,
4744  Label* not_smi_label,
4745  Register scratch,
4746  BranchDelaySlot bd) {
4747  ASSERT_EQ(0, kSmiTag);
4748  andi(scratch, value, kSmiTagMask);
4749  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4750 }
4751 
4752 
4753 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4754  Register reg2,
4755  Label* on_not_both_smi) {
4756  STATIC_ASSERT(kSmiTag == 0);
4757  ASSERT_EQ(1, kSmiTagMask);
4758  or_(at, reg1, reg2);
4759  JumpIfNotSmi(at, on_not_both_smi);
4760 }
4761 
4762 
4763 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4764  Register reg2,
4765  Label* on_either_smi) {
4766  STATIC_ASSERT(kSmiTag == 0);
4767  ASSERT_EQ(1, kSmiTagMask);
4768  // Both Smi tags must be 1 (not Smi).
4769  and_(at, reg1, reg2);
4770  JumpIfSmi(at, on_either_smi);
4771 }
4772 
4773 
4774 void MacroAssembler::AssertNotSmi(Register object) {
4775  if (emit_debug_code()) {
4776  STATIC_ASSERT(kSmiTag == 0);
4777  andi(at, object, kSmiTagMask);
4778  Check(ne, kOperandIsASmi, at, Operand(zero_reg));
4779  }
4780 }
4781 
4782 
4783 void MacroAssembler::AssertSmi(Register object) {
4784  if (emit_debug_code()) {
4785  STATIC_ASSERT(kSmiTag == 0);
4786  andi(at, object, kSmiTagMask);
4787  Check(eq, kOperandIsASmi, at, Operand(zero_reg));
4788  }
4789 }
4790 
4791 
4792 void MacroAssembler::AssertString(Register object) {
4793  if (emit_debug_code()) {
4794  STATIC_ASSERT(kSmiTag == 0);
4795  SmiTst(object, t0);
4796  Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
4797  push(object);
4798  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4799  lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4800  Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
4801  pop(object);
4802  }
4803 }
4804 
4805 
4806 void MacroAssembler::AssertName(Register object) {
4807  if (emit_debug_code()) {
4808  STATIC_ASSERT(kSmiTag == 0);
4809  SmiTst(object, t0);
4810  Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
4811  push(object);
4812  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4813  lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4814  Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
4815  pop(object);
4816  }
4817 }
4818 
4819 
4820 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
4821  Register scratch) {
4822  if (emit_debug_code()) {
4823  Label done_checking;
4824  AssertNotSmi(object);
4825  LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4826  Branch(&done_checking, eq, object, Operand(scratch));
4827  push(object);
4828  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4829  LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
4830  Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
4831  pop(object);
4832  bind(&done_checking);
4833  }
4834 }
4835 
4836 
4837 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
4838  if (emit_debug_code()) {
4839  ASSERT(!reg.is(at));
4840  LoadRoot(at, index);
4841  Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
4842  }
4843 }
4844 
4845 
4846 void MacroAssembler::JumpIfNotHeapNumber(Register object,
4847  Register heap_number_map,
4848  Register scratch,
4849  Label* on_not_heap_number) {
4850  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4851  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4852  Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4853 }
4854 
4855 
4856 void MacroAssembler::LookupNumberStringCache(Register object,
4857  Register result,
4858  Register scratch1,
4859  Register scratch2,
4860  Register scratch3,
4861  Label* not_found) {
4862  // Use of registers. Register result is used as a temporary.
4863  Register number_string_cache = result;
4864  Register mask = scratch3;
4865 
4866  // Load the number string cache.
4867  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
4868 
4869  // Make the hash mask from the length of the number string cache. It
4870  // contains two elements (number and string) for each cache entry.
4871  lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
4872  // Divide length by two (length is a smi).
4873  sra(mask, mask, kSmiTagSize + 1);
4874  Addu(mask, mask, -1); // Make mask.
4875 
4876  // Calculate the entry in the number string cache. The hash value in the
4877  // number string cache for smis is just the smi value, and the hash for
4878  // doubles is the xor of the upper and lower words. See
4879  // Heap::GetNumberStringCache.
4880  Label is_smi;
4881  Label load_result_from_cache;
4882  JumpIfSmi(object, &is_smi);
4883  CheckMap(object,
4884  scratch1,
4885  Heap::kHeapNumberMapRootIndex,
4886  not_found,
4888 
4889  STATIC_ASSERT(8 == kDoubleSize);
4890  Addu(scratch1,
4891  object,
4892  Operand(HeapNumber::kValueOffset - kHeapObjectTag));
4893  lw(scratch2, MemOperand(scratch1, kPointerSize));
4894  lw(scratch1, MemOperand(scratch1, 0));
4895  Xor(scratch1, scratch1, Operand(scratch2));
4896  And(scratch1, scratch1, Operand(mask));
4897 
4898  // Calculate address of entry in string cache: each entry consists
4899  // of two pointer sized fields.
4900  sll(scratch1, scratch1, kPointerSizeLog2 + 1);
4901  Addu(scratch1, number_string_cache, scratch1);
4902 
4903  Register probe = mask;
4904  lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
4905  JumpIfSmi(probe, not_found);
4906  ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
4907  ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
4908  BranchF(&load_result_from_cache, NULL, eq, f12, f14);
4909  Branch(not_found);
4910 
4911  bind(&is_smi);
4912  Register scratch = scratch1;
4913  sra(scratch, object, 1); // Shift away the tag.
4914  And(scratch, mask, Operand(scratch));
4915 
4916  // Calculate address of entry in string cache: each entry consists
4917  // of two pointer sized fields.
4918  sll(scratch, scratch, kPointerSizeLog2 + 1);
4919  Addu(scratch, number_string_cache, scratch);
4920 
4921  // Check if the entry is the smi we are looking for.
4922  lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
4923  Branch(not_found, ne, object, Operand(probe));
4924 
4925  // Get the result from the cache.
4926  bind(&load_result_from_cache);
4927  lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
4928 
4929  IncrementCounter(isolate()->counters()->number_to_string_native(),
4930  1,
4931  scratch1,
4932  scratch2);
4933 }
4934 
4935 
4936 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4937  Register first,
4938  Register second,
4939  Register scratch1,
4940  Register scratch2,
4941  Label* failure) {
4942  // Test that both first and second are sequential ASCII strings.
4943  // Assume that they are non-smis.
4944  lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4945  lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4946  lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4947  lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4948 
4949  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4950  scratch2,
4951  scratch1,
4952  scratch2,
4953  failure);
4954 }
4955 
4956 
4957 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4958  Register second,
4959  Register scratch1,
4960  Register scratch2,
4961  Label* failure) {
4962  // Check that neither is a smi.
4963  STATIC_ASSERT(kSmiTag == 0);
4964  And(scratch1, first, Operand(second));
4965  JumpIfSmi(scratch1, failure);
4966  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4967  second,
4968  scratch1,
4969  scratch2,
4970  failure);
4971 }
4972 
4973 
4974 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4975  Register first,
4976  Register second,
4977  Register scratch1,
4978  Register scratch2,
4979  Label* failure) {
4980  const int kFlatAsciiStringMask =
4982  const int kFlatAsciiStringTag =
4984  ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
4985  andi(scratch1, first, kFlatAsciiStringMask);
4986  Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
4987  andi(scratch2, second, kFlatAsciiStringMask);
4988  Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
4989 }
4990 
4991 
4992 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4993  Register scratch,
4994  Label* failure) {
4995  const int kFlatAsciiStringMask =
4997  const int kFlatAsciiStringTag =
4999  And(scratch, type, Operand(kFlatAsciiStringMask));
5000  Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
5001 }
5002 
5003 
5004 static const int kRegisterPassedArguments = 4;
5005 
5006 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5007  int num_double_arguments) {
5008  int stack_passed_words = 0;
5009  num_reg_arguments += 2 * num_double_arguments;
5010 
5011  // Up to four simple arguments are passed in registers a0..a3.
5012  if (num_reg_arguments > kRegisterPassedArguments) {
5013  stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5014  }
5015  stack_passed_words += kCArgSlotCount;
5016  return stack_passed_words;
5017 }
5018 
5019 
5020 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5021  Register index,
5022  Register value,
5023  Register scratch,
5024  uint32_t encoding_mask) {
5025  Label is_object;
5026  SmiTst(string, at);
5027  Check(ne, kNonObject, at, Operand(zero_reg));
5028 
5029  lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5030  lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5031 
5033  li(scratch, Operand(encoding_mask));
5034  Check(eq, kUnexpectedStringType, at, Operand(scratch));
5035 
5036  // The index is assumed to be untagged coming in, tag it to compare with the
5037  // string length without using a temp register, it is restored at the end of
5038  // this function.
5039  Label index_tag_ok, index_tag_bad;
5040  TrySmiTag(index, scratch, &index_tag_bad);
5041  Branch(&index_tag_ok);
5042  bind(&index_tag_bad);
5043  Abort(kIndexIsTooLarge);
5044  bind(&index_tag_ok);
5045 
5046  lw(at, FieldMemOperand(string, String::kLengthOffset));
5047  Check(lt, kIndexIsTooLarge, index, Operand(at));
5048 
5049  ASSERT(Smi::FromInt(0) == 0);
5050  Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5051 
5052  SmiUntag(index, index);
5053 }
5054 
5055 
5056 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5057  int num_double_arguments,
5058  Register scratch) {
5059  int frame_alignment = ActivationFrameAlignment();
5060 
5061  // Up to four simple arguments are passed in registers a0..a3.
5062  // Those four arguments must have reserved argument slots on the stack for
5063  // mips, even though those argument slots are not normally used.
5064  // Remaining arguments are pushed on the stack, above (higher address than)
5065  // the argument slots.
5066  int stack_passed_arguments = CalculateStackPassedWords(
5067  num_reg_arguments, num_double_arguments);
5068  if (frame_alignment > kPointerSize) {
5069  // Make stack end at alignment and make room for num_arguments - 4 words
5070  // and the original value of sp.
5071  mov(scratch, sp);
5072  Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5073  ASSERT(IsPowerOf2(frame_alignment));
5074  And(sp, sp, Operand(-frame_alignment));
5075  sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5076  } else {
5077  Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5078  }
5079 }
5080 
5081 
5082 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5083  Register scratch) {
5084  PrepareCallCFunction(num_reg_arguments, 0, scratch);
5085 }
5086 
5087 
5088 void MacroAssembler::CallCFunction(ExternalReference function,
5089  int num_reg_arguments,
5090  int num_double_arguments) {
5091  li(t8, Operand(function));
5092  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5093 }
5094 
5095 
5096 void MacroAssembler::CallCFunction(Register function,
5097  int num_reg_arguments,
5098  int num_double_arguments) {
5099  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5100 }
5101 
5102 
5103 void MacroAssembler::CallCFunction(ExternalReference function,
5104  int num_arguments) {
5105  CallCFunction(function, num_arguments, 0);
5106 }
5107 
5108 
5109 void MacroAssembler::CallCFunction(Register function,
5110  int num_arguments) {
5111  CallCFunction(function, num_arguments, 0);
5112 }
5113 
5114 
5115 void MacroAssembler::CallCFunctionHelper(Register function,
5116  int num_reg_arguments,
5117  int num_double_arguments) {
5118  ASSERT(has_frame());
5119  // Make sure that the stack is aligned before calling a C function unless
5120  // running in the simulator. The simulator has its own alignment check which
5121  // provides more information.
5122  // The argument stots are presumed to have been set up by
5123  // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5124 
5125 #if V8_HOST_ARCH_MIPS
5126  if (emit_debug_code()) {
5127  int frame_alignment = OS::ActivationFrameAlignment();
5128  int frame_alignment_mask = frame_alignment - 1;
5129  if (frame_alignment > kPointerSize) {
5130  ASSERT(IsPowerOf2(frame_alignment));
5131  Label alignment_as_expected;
5132  And(at, sp, Operand(frame_alignment_mask));
5133  Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5134  // Don't use Check here, as it will call Runtime_Abort possibly
5135  // re-entering here.
5136  stop("Unexpected alignment in CallCFunction");
5137  bind(&alignment_as_expected);
5138  }
5139  }
5140 #endif // V8_HOST_ARCH_MIPS
5141 
5142  // Just call directly. The function called cannot cause a GC, or
5143  // allow preemption, so the return address in the link register
5144  // stays correct.
5145 
5146  if (!function.is(t9)) {
5147  mov(t9, function);
5148  function = t9;
5149  }
5150 
5151  Call(function);
5152 
5153  int stack_passed_arguments = CalculateStackPassedWords(
5154  num_reg_arguments, num_double_arguments);
5155 
5156  if (OS::ActivationFrameAlignment() > kPointerSize) {
5157  lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5158  } else {
5159  Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5160  }
5161 }
5162 
5163 
5164 #undef BRANCH_ARGS_CHECK
5165 
5166 
5167 void MacroAssembler::PatchRelocatedValue(Register li_location,
5168  Register scratch,
5169  Register new_value) {
5170  lw(scratch, MemOperand(li_location));
5171  // At this point scratch is a lui(at, ...) instruction.
5172  if (emit_debug_code()) {
5173  And(scratch, scratch, kOpcodeMask);
5174  Check(eq, kTheInstructionToPatchShouldBeALui,
5175  scratch, Operand(LUI));
5176  lw(scratch, MemOperand(li_location));
5177  }
5178  srl(t9, new_value, kImm16Bits);
5179  Ins(scratch, t9, 0, kImm16Bits);
5180  sw(scratch, MemOperand(li_location));
5181 
5182  lw(scratch, MemOperand(li_location, kInstrSize));
5183  // scratch is now ori(at, ...).
5184  if (emit_debug_code()) {
5185  And(scratch, scratch, kOpcodeMask);
5186  Check(eq, kTheInstructionToPatchShouldBeAnOri,
5187  scratch, Operand(ORI));
5188  lw(scratch, MemOperand(li_location, kInstrSize));
5189  }
5190  Ins(scratch, new_value, 0, kImm16Bits);
5191  sw(scratch, MemOperand(li_location, kInstrSize));
5192 
5193  // Update the I-cache so the new lui and ori can be executed.
5194  FlushICache(li_location, 2);
5195 }
5196 
5197 void MacroAssembler::GetRelocatedValue(Register li_location,
5198  Register value,
5199  Register scratch) {
5200  lw(value, MemOperand(li_location));
5201  if (emit_debug_code()) {
5202  And(value, value, kOpcodeMask);
5203  Check(eq, kTheInstructionShouldBeALui,
5204  value, Operand(LUI));
5205  lw(value, MemOperand(li_location));
5206  }
5207 
5208  // value now holds a lui instruction. Extract the immediate.
5209  sll(value, value, kImm16Bits);
5210 
5211  lw(scratch, MemOperand(li_location, kInstrSize));
5212  if (emit_debug_code()) {
5213  And(scratch, scratch, kOpcodeMask);
5214  Check(eq, kTheInstructionShouldBeAnOri,
5215  scratch, Operand(ORI));
5216  lw(scratch, MemOperand(li_location, kInstrSize));
5217  }
5218  // "scratch" now holds an ori instruction. Extract the immediate.
5219  andi(scratch, scratch, kImm16Mask);
5220 
5221  // Merge the results.
5222  or_(value, value, scratch);
5223 }
5224 
5225 
5226 void MacroAssembler::CheckPageFlag(
5227  Register object,
5228  Register scratch,
5229  int mask,
5230  Condition cc,
5231  Label* condition_met) {
5232  And(scratch, object, Operand(~Page::kPageAlignmentMask));
5233  lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5234  And(scratch, scratch, Operand(mask));
5235  Branch(condition_met, cc, scratch, Operand(zero_reg));
5236 }
5237 
5238 
5239 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5240  Register scratch,
5241  Label* if_deprecated) {
5242  if (map->CanBeDeprecated()) {
5243  li(scratch, Operand(map));
5244  lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5245  And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
5246  Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5247  }
5248 }
5249 
5250 
5251 void MacroAssembler::JumpIfBlack(Register object,
5252  Register scratch0,
5253  Register scratch1,
5254  Label* on_black) {
5255  HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5256  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5257 }
5258 
5259 
5260 void MacroAssembler::HasColor(Register object,
5261  Register bitmap_scratch,
5262  Register mask_scratch,
5263  Label* has_color,
5264  int first_bit,
5265  int second_bit) {
5266  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5267  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5268 
5269  GetMarkBits(object, bitmap_scratch, mask_scratch);
5270 
5271  Label other_color, word_boundary;
5272  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5273  And(t8, t9, Operand(mask_scratch));
5274  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5275  // Shift left 1 by adding.
5276  Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5277  Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5278  And(t8, t9, Operand(mask_scratch));
5279  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5280  jmp(&other_color);
5281 
5282  bind(&word_boundary);
5283  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5284  And(t9, t9, Operand(1));
5285  Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5286  bind(&other_color);
5287 }
5288 
5289 
5290 // Detect some, but not all, common pointer-free objects. This is used by the
5291 // incremental write barrier which doesn't care about oddballs (they are always
5292 // marked black immediately so this code is not hit).
5293 void MacroAssembler::JumpIfDataObject(Register value,
5294  Register scratch,
5295  Label* not_data_object) {
5296  ASSERT(!AreAliased(value, scratch, t8, no_reg));
5297  Label is_data_object;
5298  lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5299  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5300  Branch(&is_data_object, eq, t8, Operand(scratch));
5302  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5303  // If it's a string and it's not a cons string then it's an object containing
5304  // no GC pointers.
5305  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5306  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5307  Branch(not_data_object, ne, t8, Operand(zero_reg));
5308  bind(&is_data_object);
5309 }
5310 
5311 
5312 void MacroAssembler::GetMarkBits(Register addr_reg,
5313  Register bitmap_reg,
5314  Register mask_reg) {
5315  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5316  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5317  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5318  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5319  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5320  sll(t8, t8, kPointerSizeLog2);
5321  Addu(bitmap_reg, bitmap_reg, t8);
5322  li(t8, Operand(1));
5323  sllv(mask_reg, t8, mask_reg);
5324 }
5325 
5326 
5327 void MacroAssembler::EnsureNotWhite(
5328  Register value,
5329  Register bitmap_scratch,
5330  Register mask_scratch,
5331  Register load_scratch,
5332  Label* value_is_white_and_not_data) {
5333  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5334  GetMarkBits(value, bitmap_scratch, mask_scratch);
5335 
5336  // If the value is black or grey we don't need to do anything.
5337  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5338  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5339  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5340  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5341 
5342  Label done;
5343 
5344  // Since both black and grey have a 1 in the first position and white does
5345  // not have a 1 there we only need to check one bit.
5346  lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5347  And(t8, mask_scratch, load_scratch);
5348  Branch(&done, ne, t8, Operand(zero_reg));
5349 
5350  if (emit_debug_code()) {
5351  // Check for impossible bit pattern.
5352  Label ok;
5353  // sll may overflow, making the check conservative.
5354  sll(t8, mask_scratch, 1);
5355  And(t8, load_scratch, t8);
5356  Branch(&ok, eq, t8, Operand(zero_reg));
5357  stop("Impossible marking bit pattern");
5358  bind(&ok);
5359  }
5360 
5361  // Value is white. We check whether it is data that doesn't need scanning.
5362  // Currently only checks for HeapNumber and non-cons strings.
5363  Register map = load_scratch; // Holds map while checking type.
5364  Register length = load_scratch; // Holds length of object after testing type.
5365  Label is_data_object;
5366 
5367  // Check for heap-number
5368  lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5369  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5370  {
5371  Label skip;
5372  Branch(&skip, ne, t8, Operand(map));
5373  li(length, HeapNumber::kSize);
5374  Branch(&is_data_object);
5375  bind(&skip);
5376  }
5377 
5378  // Check for strings.
5380  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5381  // If it's a string and it's not a cons string then it's an object containing
5382  // no GC pointers.
5383  Register instance_type = load_scratch;
5384  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5385  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5386  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5387  // It's a non-indirect (non-cons and non-slice) string.
5388  // If it's external, the length is just ExternalString::kSize.
5389  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5390  // External strings are the only ones with the kExternalStringTag bit
5391  // set.
5394  And(t8, instance_type, Operand(kExternalStringTag));
5395  {
5396  Label skip;
5397  Branch(&skip, eq, t8, Operand(zero_reg));
5398  li(length, ExternalString::kSize);
5399  Branch(&is_data_object);
5400  bind(&skip);
5401  }
5402 
5403  // Sequential string, either ASCII or UC16.
5404  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5405  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5406  // getting the length multiplied by 2.
5408  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5409  lw(t9, FieldMemOperand(value, String::kLengthOffset));
5410  And(t8, instance_type, Operand(kStringEncodingMask));
5411  {
5412  Label skip;
5413  Branch(&skip, eq, t8, Operand(zero_reg));
5414  srl(t9, t9, 1);
5415  bind(&skip);
5416  }
5417  Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5418  And(length, length, Operand(~kObjectAlignmentMask));
5419 
5420  bind(&is_data_object);
5421  // Value is a data object, and it is white. Mark it black. Since we know
5422  // that the object is white we can make it black by flipping one bit.
5423  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5424  Or(t8, t8, Operand(mask_scratch));
5425  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5426 
5427  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5428  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5429  Addu(t8, t8, Operand(length));
5430  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5431 
5432  bind(&done);
5433 }
5434 
5435 
5436 void MacroAssembler::Throw(BailoutReason reason) {
5437  Label throw_start;
5438  bind(&throw_start);
5439 #ifdef DEBUG
5440  const char* msg = GetBailoutReason(reason);
5441  if (msg != NULL) {
5442  RecordComment("Throw message: ");
5443  RecordComment(msg);
5444  }
5445 #endif
5446 
5447  li(a0, Operand(Smi::FromInt(reason)));
5448  push(a0);
5449  // Disable stub call restrictions to always allow calls to throw.
5450  if (!has_frame_) {
5451  // We don't actually want to generate a pile of code for this, so just
5452  // claim there is a stack frame, without generating one.
5453  FrameScope scope(this, StackFrame::NONE);
5454  CallRuntime(Runtime::kHiddenThrowMessage, 1);
5455  } else {
5456  CallRuntime(Runtime::kHiddenThrowMessage, 1);
5457  }
5458  // will not return here
5459  if (is_trampoline_pool_blocked()) {
5460  // If the calling code cares throw the exact number of
5461  // instructions generated, we insert padding here to keep the size
5462  // of the ThrowMessage macro constant.
5463  // Currently in debug mode with debug_code enabled the number of
5464  // generated instructions is 14, so we use this as a maximum value.
5465  static const int kExpectedThrowMessageInstructions = 14;
5466  int throw_instructions = InstructionsGeneratedSince(&throw_start);
5467  ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
5468  while (throw_instructions++ < kExpectedThrowMessageInstructions) {
5469  nop();
5470  }
5471  }
5472 }
5473 
5474 
5475 void MacroAssembler::ThrowIf(Condition cc,
5476  BailoutReason reason,
5477  Register rs,
5478  Operand rt) {
5479  Label L;
5480  Branch(&L, NegateCondition(cc), rs, rt);
5481  Throw(reason);
5482  // will not return here
5483  bind(&L);
5484 }
5485 
5486 
5487 void MacroAssembler::LoadInstanceDescriptors(Register map,
5488  Register descriptors) {
5489  lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5490 }
5491 
5492 
5493 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5494  lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5495  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5496 }
5497 
5498 
5499 void MacroAssembler::EnumLength(Register dst, Register map) {
5500  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5501  lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5502  And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
5503 }
5504 
5505 
5506 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5507  Register empty_fixed_array_value = t2;
5508  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5509  Label next, start;
5510  mov(a2, a0);
5511 
5512  // Check if the enum length field is properly initialized, indicating that
5513  // there is an enum cache.
5514  lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5515 
5516  EnumLength(a3, a1);
5517  Branch(
5518  call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5519 
5520  jmp(&start);
5521 
5522  bind(&next);
5523  lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5524 
5525  // For all objects but the receiver, check that the cache is empty.
5526  EnumLength(a3, a1);
5527  Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5528 
5529  bind(&start);
5530 
5531  // Check that there are no elements. Register a2 contains the current JS
5532  // object we've reached through the prototype chain.
5533  Label no_elements;
5534  lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5535  Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5536 
5537  // Second chance, the object may be using the empty slow element dictionary.
5538  LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5539  Branch(call_runtime, ne, a2, Operand(at));
5540 
5541  bind(&no_elements);
5542  lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5543  Branch(&next, ne, a2, Operand(null_value));
5544 }
5545 
5546 
5547 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5548  ASSERT(!output_reg.is(input_reg));
5549  Label done;
5550  li(output_reg, Operand(255));
5551  // Normal branch: nop in delay slot.
5552  Branch(&done, gt, input_reg, Operand(output_reg));
5553  // Use delay slot in this branch.
5554  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5555  mov(output_reg, zero_reg); // In delay slot.
5556  mov(output_reg, input_reg); // Value is in range 0..255.
5557  bind(&done);
5558 }
5559 
5560 
5561 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5562  DoubleRegister input_reg,
5563  DoubleRegister temp_double_reg) {
5564  Label above_zero;
5565  Label done;
5566  Label in_bounds;
5567 
5568  Move(temp_double_reg, 0.0);
5569  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5570 
5571  // Double value is less than zero, NaN or Inf, return 0.
5572  mov(result_reg, zero_reg);
5573  Branch(&done);
5574 
5575  // Double value is >= 255, return 255.
5576  bind(&above_zero);
5577  Move(temp_double_reg, 255.0);
5578  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5579  li(result_reg, Operand(255));
5580  Branch(&done);
5581 
5582  // In 0-255 range, round and truncate.
5583  bind(&in_bounds);
5584  cvt_w_d(temp_double_reg, input_reg);
5585  mfc1(result_reg, temp_double_reg);
5586  bind(&done);
5587 }
5588 
5589 
5590 void MacroAssembler::TestJSArrayForAllocationMemento(
5591  Register receiver_reg,
5592  Register scratch_reg,
5593  Label* no_memento_found,
5594  Condition cond,
5595  Label* allocation_memento_present) {
5596  ExternalReference new_space_start =
5597  ExternalReference::new_space_start(isolate());
5598  ExternalReference new_space_allocation_top =
5599  ExternalReference::new_space_allocation_top_address(isolate());
5600  Addu(scratch_reg, receiver_reg,
5601  Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5602  Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5603  li(at, Operand(new_space_allocation_top));
5604  lw(at, MemOperand(at));
5605  Branch(no_memento_found, gt, scratch_reg, Operand(at));
5606  lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5607  if (allocation_memento_present) {
5608  Branch(allocation_memento_present, cond, scratch_reg,
5609  Operand(isolate()->factory()->allocation_memento_map()));
5610  }
5611 }
5612 
5613 
5614 Register GetRegisterThatIsNotOneOf(Register reg1,
5615  Register reg2,
5616  Register reg3,
5617  Register reg4,
5618  Register reg5,
5619  Register reg6) {
5620  RegList regs = 0;
5621  if (reg1.is_valid()) regs |= reg1.bit();
5622  if (reg2.is_valid()) regs |= reg2.bit();
5623  if (reg3.is_valid()) regs |= reg3.bit();
5624  if (reg4.is_valid()) regs |= reg4.bit();
5625  if (reg5.is_valid()) regs |= reg5.bit();
5626  if (reg6.is_valid()) regs |= reg6.bit();
5627 
5628  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5629  Register candidate = Register::FromAllocationIndex(i);
5630  if (regs & candidate.bit()) continue;
5631  return candidate;
5632  }
5633  UNREACHABLE();
5634  return no_reg;
5635 }
5636 
5637 
5638 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5639  Register object,
5640  Register scratch0,
5641  Register scratch1,
5642  Label* found) {
5643  ASSERT(!scratch1.is(scratch0));
5644  Factory* factory = isolate()->factory();
5645  Register current = scratch0;
5646  Label loop_again;
5647 
5648  // Scratch contained elements pointer.
5649  Move(current, object);
5650 
5651  // Loop based on the map going up the prototype chain.
5652  bind(&loop_again);
5653  lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5654  lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5655  Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
5656  Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5657  lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5658  Branch(&loop_again, ne, current, Operand(factory->null_value()));
5659 }
5660 
5661 
5662 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5663  if (r1.is(r2)) return true;
5664  if (r1.is(r3)) return true;
5665  if (r1.is(r4)) return true;
5666  if (r2.is(r3)) return true;
5667  if (r2.is(r4)) return true;
5668  if (r3.is(r4)) return true;
5669  return false;
5670 }
5671 
5672 
5673 CodePatcher::CodePatcher(byte* address, int instructions)
5674  : address_(address),
5675  size_(instructions * Assembler::kInstrSize),
5676  masm_(NULL, address, size_ + Assembler::kGap) {
5677  // Create a new macro assembler pointing to the address of the code to patch.
5678  // The size is adjusted with kGap on order for the assembler to generate size
5679  // bytes of instructions without failing with buffer size constraints.
5680  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5681 }
5682 
5683 
5684 CodePatcher::~CodePatcher() {
5685  // Indicate that code has changed.
5686  CPU::FlushICache(address_, size_);
5687 
5688  // Check that the code was patched as expected.
5689  ASSERT(masm_.pc_ == address_ + size_);
5690  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5691 }
5692 
5693 
5694 void CodePatcher::Emit(Instr instr) {
5695  masm()->emit(instr);
5696 }
5697 
5698 
5699 void CodePatcher::Emit(Address addr) {
5700  masm()->emit(reinterpret_cast<Instr>(addr));
5701 }
5702 
5703 
5704 void CodePatcher::ChangeBranchCondition(Condition cond) {
5705  Instr instr = Assembler::instr_at(masm_.pc_);
5706  ASSERT(Assembler::IsBranch(instr));
5707  uint32_t opcode = Assembler::GetOpcodeField(instr);
5708  // Currently only the 'eq' and 'ne' cond values are supported and the simple
5709  // branch instructions (with opcode being the branch type).
5710  // There are some special cases (see Assembler::IsBranch()) so extending this
5711  // would be tricky.
5712  ASSERT(opcode == BEQ ||
5713  opcode == BNE ||
5714  opcode == BLEZ ||
5715  opcode == BGTZ ||
5716  opcode == BEQL ||
5717  opcode == BNEL ||
5718  opcode == BLEZL ||
5719  opcode == BGTZL);
5720  opcode = (cond == eq) ? BEQ : BNE;
5721  instr = (instr & ~kOpcodeMask) | opcode;
5722  masm_.emit(instr);
5723 }
5724 
5725 
5726 void MacroAssembler::TruncatingDiv(Register result,
5727  Register dividend,
5728  int32_t divisor) {
5729  ASSERT(!dividend.is(result));
5730  ASSERT(!dividend.is(at));
5731  ASSERT(!result.is(at));
5732  MultiplierAndShift ms(divisor);
5733  li(at, Operand(ms.multiplier()));
5734  Mult(dividend, Operand(at));
5735  mfhi(result);
5736  if (divisor > 0 && ms.multiplier() < 0) {
5737  Addu(result, result, Operand(dividend));
5738  }
5739  if (divisor < 0 && ms.multiplier() > 0) {
5740  Subu(result, result, Operand(dividend));
5741  }
5742  if (ms.shift() > 0) sra(result, result, ms.shift());
5743  srl(at, dividend, 31);
5744  Addu(result, result, Operand(at));
5745 }
5746 
5747 
5748 } } // namespace v8::internal
5749 
5750 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:186
const SwVfpRegister s2
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:5480
const intptr_t kDoubleAlignmentMask
Definition: v8globals.h:53
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: v8globals.h:456
const Register r3
const int kDoubleSizeLog2
Definition: globals.h:273
const int kNumRegisters
Definition: constants-arm.h:57
const FPURegister f0
TypeImpl< ZoneTypeConfig > Type
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:41
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
const int kLuiShift
#define ASSERT(condition)
Definition: checks.h:329
const RegList kJSCallerSaved
Definition: frames-arm.h:47
const int kPointerSizeLog2
Definition: globals.h:281
unsigned short uint16_t
Definition: unicode.cc:46
const uint32_t kStringRepresentationMask
Definition: objects.h:615
const uint32_t kFCSRUnderflowFlagMask
#define CHECK(condition)
Definition: checks.h:75
const Register r2
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
const intptr_t kHeapObjectTagMask
Definition: v8.h:5475
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
const SwVfpRegister s6
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
const uint32_t kNotStringTag
Definition: objects.h:599
const Register sp
const SwVfpRegister s3
#define UNREACHABLE()
Definition: checks.h:52
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
#define kLithiumScratchDouble
const int kDoubleSize
Definition: globals.h:266
const uint32_t kFCSROverflowFlagMask
#define kNumSafepointSavedRegisters
Definition: frames-arm64.h:52
const int kImm16Bits
PrologueFrameMode
Definition: frames.h:957
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
const bool IsMipsSoftFloatABI
const int kOpcodeMask
const int kPointerSize
Definition: globals.h:268
const Address kZapValue
Definition: v8globals.h:82
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
const int kHeapObjectTag
Definition: v8.h:5473
bool IsAligned(T value, U alignment)
Definition: utils.h:211
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
#define cp
const int kCArgSlotCount
const int kImm28Mask
const SwVfpRegister s0
bool IsPowerOf2(T x)
Definition: utils.h:51
const FPURegister f2
int TenToThe(int exponent)
Definition: utils.h:880
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:598
#define UNIMPLEMENTED_MIPS()
const uint32_t kInternalizedTag
Definition: objects.h:605
const SwVfpRegister s1
InvokeFlag
Definition: v8.h:2107
AllocationFlags
const uint32_t kFCSRInvalidOpFlagMask
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
Definition: shell.cc:171
const uint32_t kIsNotStringMask
Definition: objects.h:597
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
const Register r1
const char * GetBailoutReason(BailoutReason reason)
Definition: objects.cc:16437
#define kDoubleRegZero
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
MemOperand FieldMemOperand(Register object, int offset)
const int kNumSafepointRegisters
Definition: frames-arm.h:92
const FPUControlRegister FCSR
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const int kSafepointRegisterStackIndexMap[kNumRegs]
Definition: frames-mips.h:117
const intptr_t kPointerAlignment
Definition: v8globals.h:48
const int kSmiTagSize
Definition: v8.h:5479
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
const uint32_t kOneByteStringTag
Definition: objects.h:611
const int kSmiTag
Definition: v8.h:5478
const FPURegister f12
const uint32_t kIsIndirectStringTag
Definition: objects.h:623
HeapObject * obj
const int kPageSizeBits
Definition: v8globals.h:95
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
const Register no_reg
const Register fp
const uint32_t kFCSRFlagMask
#define STATIC_ASSERT(test)
Definition: checks.h:341
const int kHiMask
signed short int16_t
Definition: unicode.cc:45
Register ToRegister(int num)
const FPURegister f14
#define kSafepointSavedRegisters
Definition: frames-arm64.h:51
int NumberOfBitsSet(uint32_t x)
Definition: assembler.h:1037
const intptr_t kDoubleAlignment
Definition: v8globals.h:52
const int kCharSize
Definition: globals.h:261
const uint32_t kFCSRInexactFlagMask
const uint32_t kStringEncodingMask
Definition: objects.h:609
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register r4