v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 
39 class SafepointGenerator : public CallWrapper {
40  public:
41  SafepointGenerator(LCodeGen* codegen,
42  LPointerMap* pointers,
43  Safepoint::DeoptMode mode)
44  : codegen_(codegen),
45  pointers_(pointers),
46  deopt_mode_(mode) { }
47  virtual ~SafepointGenerator() { }
48 
49  virtual void BeforeCall(int call_size) const { }
50 
51  virtual void AfterCall() const {
52  codegen_->RecordSafepoint(pointers_, deopt_mode_);
53  }
54 
55  private:
56  LCodeGen* codegen_;
57  LPointerMap* pointers_;
58  Safepoint::DeoptMode deopt_mode_;
59 };
60 
61 
62 #define __ masm()->
63 
64 bool LCodeGen::GenerateCode() {
65  HPhase phase("Z_Code generation", chunk());
66  ASSERT(is_unused());
67  status_ = GENERATING;
68  CpuFeatures::Scope scope(FPU);
69 
70  CodeStub::GenerateFPStubs();
71 
72  // Open a frame scope to indicate that there is a frame on the stack. The
73  // NONE indicates that the scope shouldn't actually generate code to set up
74  // the frame (that is done in GeneratePrologue).
75  FrameScope frame_scope(masm_, StackFrame::NONE);
76 
77  return GeneratePrologue() &&
78  GenerateBody() &&
79  GenerateDeferredCode() &&
80  GenerateSafepointTable();
81 }
82 
83 
84 void LCodeGen::FinishCode(Handle<Code> code) {
85  ASSERT(is_done());
86  code->set_stack_slots(GetStackSlotCount());
87  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
88  PopulateDeoptimizationData(code);
89 }
90 
91 
92 void LCodeGen::Abort(const char* format, ...) {
93  if (FLAG_trace_bailout) {
94  SmartArrayPointer<char> name(
95  info()->shared_info()->DebugName()->ToCString());
96  PrintF("Aborting LCodeGen in @\"%s\": ", *name);
97  va_list arguments;
98  va_start(arguments, format);
99  OS::VPrint(format, arguments);
100  va_end(arguments);
101  PrintF("\n");
102  }
103  status_ = ABORTED;
104 }
105 
106 
107 void LCodeGen::Comment(const char* format, ...) {
108  if (!FLAG_code_comments) return;
109  char buffer[4 * KB];
110  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
111  va_list arguments;
112  va_start(arguments, format);
113  builder.AddFormattedList(format, arguments);
114  va_end(arguments);
115 
116  // Copy the string before recording it in the assembler to avoid
117  // issues when the stack allocated buffer goes out of scope.
118  size_t length = builder.position();
119  Vector<char> copy = Vector<char>::New(length + 1);
120  memcpy(copy.start(), builder.Finalize(), copy.length());
121  masm()->RecordComment(copy.start());
122 }
123 
124 
125 bool LCodeGen::GeneratePrologue() {
126  ASSERT(is_generating());
127 
128 #ifdef DEBUG
129  if (strlen(FLAG_stop_at) > 0 &&
130  info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
131  __ stop("stop_at");
132  }
133 #endif
134 
135  // a1: Callee's JS function.
136  // cp: Callee's context.
137  // fp: Caller's frame pointer.
138  // lr: Caller's pc.
139 
140  // Strict mode functions and builtins need to replace the receiver
141  // with undefined when called as functions (without an explicit
142  // receiver object). r5 is zero for method calls and non-zero for
143  // function calls.
144  if (!info_->is_classic_mode() || info_->is_native()) {
145  Label ok;
146  __ Branch(&ok, eq, t1, Operand(zero_reg));
147 
148  int receiver_offset = scope()->num_parameters() * kPointerSize;
149  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
150  __ sw(a2, MemOperand(sp, receiver_offset));
151  __ bind(&ok);
152  }
153 
154  __ Push(ra, fp, cp, a1);
155  __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
156 
157  // Reserve space for the stack slots needed by the code.
158  int slots = GetStackSlotCount();
159  if (slots > 0) {
160  if (FLAG_debug_code) {
161  __ li(a0, Operand(slots));
162  __ li(a2, Operand(kSlotsZapValue));
163  Label loop;
164  __ bind(&loop);
165  __ push(a2);
166  __ Subu(a0, a0, 1);
167  __ Branch(&loop, ne, a0, Operand(zero_reg));
168  } else {
169  __ Subu(sp, sp, Operand(slots * kPointerSize));
170  }
171  }
172 
173  // Possibly allocate a local context.
174  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175  if (heap_slots > 0) {
176  Comment(";;; Allocate local context");
177  // Argument to NewContext is the function, which is in a1.
178  __ push(a1);
179  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
180  FastNewContextStub stub(heap_slots);
181  __ CallStub(&stub);
182  } else {
183  __ CallRuntime(Runtime::kNewFunctionContext, 1);
184  }
185  RecordSafepoint(Safepoint::kNoLazyDeopt);
186  // Context is returned in both v0 and cp. It replaces the context
187  // passed to us. It's saved in the stack and kept live in cp.
189  // Copy any necessary parameters into the context.
190  int num_parameters = scope()->num_parameters();
191  for (int i = 0; i < num_parameters; i++) {
192  Variable* var = scope()->parameter(i);
193  if (var->IsContextSlot()) {
194  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
195  (num_parameters - 1 - i) * kPointerSize;
196  // Load parameter from stack.
197  __ lw(a0, MemOperand(fp, parameter_offset));
198  // Store it in the context.
199  MemOperand target = ContextOperand(cp, var->index());
200  __ sw(a0, target);
201  // Update the write barrier. This clobbers a3 and a0.
202  __ RecordWriteContextSlot(
203  cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
204  }
205  }
206  Comment(";;; End allocate local context");
207  }
208 
209  // Trace the call.
210  if (FLAG_trace) {
211  __ CallRuntime(Runtime::kTraceEnter, 0);
212  }
213  EnsureSpaceForLazyDeopt();
214  return !is_aborted();
215 }
216 
217 
218 bool LCodeGen::GenerateBody() {
219  ASSERT(is_generating());
220  bool emit_instructions = true;
221  for (current_instruction_ = 0;
222  !is_aborted() && current_instruction_ < instructions_->length();
223  current_instruction_++) {
224  LInstruction* instr = instructions_->at(current_instruction_);
225  if (instr->IsLabel()) {
226  LLabel* label = LLabel::cast(instr);
227  emit_instructions = !label->HasReplacement();
228  }
229 
230  if (emit_instructions) {
231  Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
232  instr->CompileToNative(this);
233  }
234  }
235  return !is_aborted();
236 }
237 
238 
239 bool LCodeGen::GenerateDeferredCode() {
240  ASSERT(is_generating());
241  if (deferred_.length() > 0) {
242  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
243  LDeferredCode* code = deferred_[i];
244  __ bind(code->entry());
245  Comment(";;; Deferred code @%d: %s.",
246  code->instruction_index(),
247  code->instr()->Mnemonic());
248  code->Generate();
249  __ jmp(code->exit());
250  }
251  }
252  // Deferred code is the last part of the instruction sequence. Mark
253  // the generated code as done unless we bailed out.
254  if (!is_aborted()) status_ = DONE;
255  return !is_aborted();
256 }
257 
258 
259 bool LCodeGen::GenerateDeoptJumpTable() {
260  // TODO(plind): not clear that this will have advantage for MIPS.
261  // Skipping it for now. Raised issue #100 for this.
262  Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
263  return false;
264 }
265 
266 
267 bool LCodeGen::GenerateSafepointTable() {
268  ASSERT(is_done());
269  safepoints_.Emit(masm(), GetStackSlotCount());
270  return !is_aborted();
271 }
272 
273 
274 Register LCodeGen::ToRegister(int index) const {
275  return Register::FromAllocationIndex(index);
276 }
277 
278 
279 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
281 }
282 
283 
284 Register LCodeGen::ToRegister(LOperand* op) const {
285  ASSERT(op->IsRegister());
286  return ToRegister(op->index());
287 }
288 
289 
290 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
291  if (op->IsRegister()) {
292  return ToRegister(op->index());
293  } else if (op->IsConstantOperand()) {
294  LConstantOperand* const_op = LConstantOperand::cast(op);
295  Handle<Object> literal = chunk_->LookupLiteral(const_op);
296  Representation r = chunk_->LookupLiteralRepresentation(const_op);
297  if (r.IsInteger32()) {
298  ASSERT(literal->IsNumber());
299  __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
300  } else if (r.IsDouble()) {
301  Abort("EmitLoadRegister: Unsupported double immediate.");
302  } else {
303  ASSERT(r.IsTagged());
304  if (literal->IsSmi()) {
305  __ li(scratch, Operand(literal));
306  } else {
307  __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
308  }
309  }
310  return scratch;
311  } else if (op->IsStackSlot() || op->IsArgument()) {
312  __ lw(scratch, ToMemOperand(op));
313  return scratch;
314  }
315  UNREACHABLE();
316  return scratch;
317 }
318 
319 
320 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
321  ASSERT(op->IsDoubleRegister());
322  return ToDoubleRegister(op->index());
323 }
324 
325 
326 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
327  FloatRegister flt_scratch,
328  DoubleRegister dbl_scratch) {
329  if (op->IsDoubleRegister()) {
330  return ToDoubleRegister(op->index());
331  } else if (op->IsConstantOperand()) {
332  LConstantOperand* const_op = LConstantOperand::cast(op);
333  Handle<Object> literal = chunk_->LookupLiteral(const_op);
334  Representation r = chunk_->LookupLiteralRepresentation(const_op);
335  if (r.IsInteger32()) {
336  ASSERT(literal->IsNumber());
337  __ li(at, Operand(static_cast<int32_t>(literal->Number())));
338  __ mtc1(at, flt_scratch);
339  __ cvt_d_w(dbl_scratch, flt_scratch);
340  return dbl_scratch;
341  } else if (r.IsDouble()) {
342  Abort("unsupported double immediate");
343  } else if (r.IsTagged()) {
344  Abort("unsupported tagged immediate");
345  }
346  } else if (op->IsStackSlot() || op->IsArgument()) {
347  MemOperand mem_op = ToMemOperand(op);
348  __ ldc1(dbl_scratch, mem_op);
349  return dbl_scratch;
350  }
351  UNREACHABLE();
352  return dbl_scratch;
353 }
354 
355 
356 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
357  Handle<Object> literal = chunk_->LookupLiteral(op);
358  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
359  return literal;
360 }
361 
362 
363 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
364  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
365 }
366 
367 
368 int LCodeGen::ToInteger32(LConstantOperand* op) const {
369  Handle<Object> value = chunk_->LookupLiteral(op);
370  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
371  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
372  value->Number());
373  return static_cast<int32_t>(value->Number());
374 }
375 
376 
377 double LCodeGen::ToDouble(LConstantOperand* op) const {
378  Handle<Object> value = chunk_->LookupLiteral(op);
379  return value->Number();
380 }
381 
382 
383 Operand LCodeGen::ToOperand(LOperand* op) {
384  if (op->IsConstantOperand()) {
385  LConstantOperand* const_op = LConstantOperand::cast(op);
386  Handle<Object> literal = chunk_->LookupLiteral(const_op);
387  Representation r = chunk_->LookupLiteralRepresentation(const_op);
388  if (r.IsInteger32()) {
389  ASSERT(literal->IsNumber());
390  return Operand(static_cast<int32_t>(literal->Number()));
391  } else if (r.IsDouble()) {
392  Abort("ToOperand Unsupported double immediate.");
393  }
394  ASSERT(r.IsTagged());
395  return Operand(literal);
396  } else if (op->IsRegister()) {
397  return Operand(ToRegister(op));
398  } else if (op->IsDoubleRegister()) {
399  Abort("ToOperand IsDoubleRegister unimplemented");
400  return Operand(0);
401  }
402  // Stack slots not implemented, use ToMemOperand instead.
403  UNREACHABLE();
404  return Operand(0);
405 }
406 
407 
408 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
409  ASSERT(!op->IsRegister());
410  ASSERT(!op->IsDoubleRegister());
411  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
412  int index = op->index();
413  if (index >= 0) {
414  // Local or spill slot. Skip the frame pointer, function, and
415  // context in the fixed part of the frame.
416  return MemOperand(fp, -(index + 3) * kPointerSize);
417  } else {
418  // Incoming parameter. Skip the return address.
419  return MemOperand(fp, -(index - 1) * kPointerSize);
420  }
421 }
422 
423 
424 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
425  ASSERT(op->IsDoubleStackSlot());
426  int index = op->index();
427  if (index >= 0) {
428  // Local or spill slot. Skip the frame pointer, function, context,
429  // and the first word of the double in the fixed part of the frame.
430  return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
431  } else {
432  // Incoming parameter. Skip the return address and the first word of
433  // the double.
434  return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
435  }
436 }
437 
438 
439 void LCodeGen::WriteTranslation(LEnvironment* environment,
440  Translation* translation) {
441  if (environment == NULL) return;
442 
443  // The translation includes one command per value in the environment.
444  int translation_size = environment->values()->length();
445  // The output frame height does not include the parameters.
446  int height = translation_size - environment->parameter_count();
447 
448  WriteTranslation(environment->outer(), translation);
449  int closure_id = DefineDeoptimizationLiteral(environment->closure());
450  switch (environment->frame_type()) {
451  case JS_FUNCTION:
452  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
453  break;
454  case JS_CONSTRUCT:
455  translation->BeginConstructStubFrame(closure_id, translation_size);
456  break;
457  case ARGUMENTS_ADAPTOR:
458  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
459  break;
460  default:
461  UNREACHABLE();
462  }
463  for (int i = 0; i < translation_size; ++i) {
464  LOperand* value = environment->values()->at(i);
465  // spilled_registers_ and spilled_double_registers_ are either
466  // both NULL or both set.
467  if (environment->spilled_registers() != NULL && value != NULL) {
468  if (value->IsRegister() &&
469  environment->spilled_registers()[value->index()] != NULL) {
470  translation->MarkDuplicate();
471  AddToTranslation(translation,
472  environment->spilled_registers()[value->index()],
473  environment->HasTaggedValueAt(i));
474  } else if (
475  value->IsDoubleRegister() &&
476  environment->spilled_double_registers()[value->index()] != NULL) {
477  translation->MarkDuplicate();
478  AddToTranslation(
479  translation,
480  environment->spilled_double_registers()[value->index()],
481  false);
482  }
483  }
484 
485  AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
486  }
487 }
488 
489 
490 void LCodeGen::AddToTranslation(Translation* translation,
491  LOperand* op,
492  bool is_tagged) {
493  if (op == NULL) {
494  // TODO(twuerthinger): Introduce marker operands to indicate that this value
495  // is not present and must be reconstructed from the deoptimizer. Currently
496  // this is only used for the arguments object.
497  translation->StoreArgumentsObject();
498  } else if (op->IsStackSlot()) {
499  if (is_tagged) {
500  translation->StoreStackSlot(op->index());
501  } else {
502  translation->StoreInt32StackSlot(op->index());
503  }
504  } else if (op->IsDoubleStackSlot()) {
505  translation->StoreDoubleStackSlot(op->index());
506  } else if (op->IsArgument()) {
507  ASSERT(is_tagged);
508  int src_index = GetStackSlotCount() + op->index();
509  translation->StoreStackSlot(src_index);
510  } else if (op->IsRegister()) {
511  Register reg = ToRegister(op);
512  if (is_tagged) {
513  translation->StoreRegister(reg);
514  } else {
515  translation->StoreInt32Register(reg);
516  }
517  } else if (op->IsDoubleRegister()) {
518  DoubleRegister reg = ToDoubleRegister(op);
519  translation->StoreDoubleRegister(reg);
520  } else if (op->IsConstantOperand()) {
521  Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
522  int src_index = DefineDeoptimizationLiteral(literal);
523  translation->StoreLiteral(src_index);
524  } else {
525  UNREACHABLE();
526  }
527 }
528 
529 
530 void LCodeGen::CallCode(Handle<Code> code,
531  RelocInfo::Mode mode,
532  LInstruction* instr) {
533  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
534 }
535 
536 
537 void LCodeGen::CallCodeGeneric(Handle<Code> code,
538  RelocInfo::Mode mode,
539  LInstruction* instr,
540  SafepointMode safepoint_mode) {
541  ASSERT(instr != NULL);
542  LPointerMap* pointers = instr->pointer_map();
543  RecordPosition(pointers->position());
544  __ Call(code, mode);
545  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
546 }
547 
548 
549 void LCodeGen::CallRuntime(const Runtime::Function* function,
550  int num_arguments,
551  LInstruction* instr) {
552  ASSERT(instr != NULL);
553  LPointerMap* pointers = instr->pointer_map();
554  ASSERT(pointers != NULL);
555  RecordPosition(pointers->position());
556 
557  __ CallRuntime(function, num_arguments);
558  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
559 }
560 
561 
562 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
563  int argc,
564  LInstruction* instr) {
565  __ CallRuntimeSaveDoubles(id);
566  RecordSafepointWithRegisters(
567  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
568 }
569 
570 
571 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
572  Safepoint::DeoptMode mode) {
573  if (!environment->HasBeenRegistered()) {
574  // Physical stack frame layout:
575  // -x ............. -4 0 ..................................... y
576  // [incoming arguments] [spill slots] [pushed outgoing arguments]
577 
578  // Layout of the environment:
579  // 0 ..................................................... size-1
580  // [parameters] [locals] [expression stack including arguments]
581 
582  // Layout of the translation:
583  // 0 ........................................................ size - 1 + 4
584  // [expression stack including arguments] [locals] [4 words] [parameters]
585  // |>------------ translation_size ------------<|
586 
587  int frame_count = 0;
588  int jsframe_count = 0;
589  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
590  ++frame_count;
591  if (e->frame_type() == JS_FUNCTION) {
592  ++jsframe_count;
593  }
594  }
595  Translation translation(&translations_, frame_count, jsframe_count, zone());
596  WriteTranslation(environment, &translation);
597  int deoptimization_index = deoptimizations_.length();
598  int pc_offset = masm()->pc_offset();
599  environment->Register(deoptimization_index,
600  translation.index(),
601  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
602  deoptimizations_.Add(environment, zone());
603  }
604 }
605 
606 
607 void LCodeGen::DeoptimizeIf(Condition cc,
608  LEnvironment* environment,
609  Register src1,
610  const Operand& src2) {
611  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
612  ASSERT(environment->HasBeenRegistered());
613  int id = environment->deoptimization_index();
615  if (entry == NULL) {
616  Abort("bailout was not prepared");
617  return;
618  }
619 
620  ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
621 
622  if (FLAG_deopt_every_n_times == 1 &&
623  info_->shared_info()->opt_count() == id) {
624  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
625  return;
626  }
627 
628  if (FLAG_trap_on_deopt) {
629  Label skip;
630  if (cc != al) {
631  __ Branch(&skip, NegateCondition(cc), src1, src2);
632  }
633  __ stop("trap_on_deopt");
634  __ bind(&skip);
635  }
636 
637  // TODO(plind): The Arm port is a little different here, due to their
638  // DeOpt jump table, which is not used for Mips yet.
639  __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
640 }
641 
642 
643 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
644  int length = deoptimizations_.length();
645  if (length == 0) return;
646  Handle<DeoptimizationInputData> data =
647  factory()->NewDeoptimizationInputData(length, TENURED);
648 
649  Handle<ByteArray> translations = translations_.CreateByteArray();
650  data->SetTranslationByteArray(*translations);
651  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
652 
653  Handle<FixedArray> literals =
654  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
655  for (int i = 0; i < deoptimization_literals_.length(); i++) {
656  literals->set(i, *deoptimization_literals_[i]);
657  }
658  data->SetLiteralArray(*literals);
659 
660  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
661  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
662 
663  // Populate the deoptimization entries.
664  for (int i = 0; i < length; i++) {
665  LEnvironment* env = deoptimizations_[i];
666  data->SetAstId(i, Smi::FromInt(env->ast_id()));
667  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
668  data->SetArgumentsStackHeight(i,
669  Smi::FromInt(env->arguments_stack_height()));
670  data->SetPc(i, Smi::FromInt(env->pc_offset()));
671  }
672  code->set_deoptimization_data(*data);
673 }
674 
675 
676 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
677  int result = deoptimization_literals_.length();
678  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
679  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
680  }
681  deoptimization_literals_.Add(literal, zone());
682  return result;
683 }
684 
685 
686 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
687  ASSERT(deoptimization_literals_.length() == 0);
688 
689  const ZoneList<Handle<JSFunction> >* inlined_closures =
690  chunk()->inlined_closures();
691 
692  for (int i = 0, length = inlined_closures->length();
693  i < length;
694  i++) {
695  DefineDeoptimizationLiteral(inlined_closures->at(i));
696  }
697 
698  inlined_function_count_ = deoptimization_literals_.length();
699 }
700 
701 
702 void LCodeGen::RecordSafepointWithLazyDeopt(
703  LInstruction* instr, SafepointMode safepoint_mode) {
704  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
705  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
706  } else {
707  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
708  RecordSafepointWithRegisters(
709  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
710  }
711 }
712 
713 
714 void LCodeGen::RecordSafepoint(
715  LPointerMap* pointers,
716  Safepoint::Kind kind,
717  int arguments,
718  Safepoint::DeoptMode deopt_mode) {
719  ASSERT(expected_safepoint_kind_ == kind);
720 
721  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
722  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
723  kind, arguments, deopt_mode);
724  for (int i = 0; i < operands->length(); i++) {
725  LOperand* pointer = operands->at(i);
726  if (pointer->IsStackSlot()) {
727  safepoint.DefinePointerSlot(pointer->index(), zone());
728  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
729  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
730  }
731  }
732  if (kind & Safepoint::kWithRegisters) {
733  // Register cp always contains a pointer to the context.
734  safepoint.DefinePointerRegister(cp, zone());
735  }
736 }
737 
738 
739 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
740  Safepoint::DeoptMode deopt_mode) {
741  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
742 }
743 
744 
745 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
746  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
747  RecordSafepoint(&empty_pointers, deopt_mode);
748 }
749 
750 
751 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
752  int arguments,
753  Safepoint::DeoptMode deopt_mode) {
754  RecordSafepoint(
755  pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
756 }
757 
758 
759 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
760  LPointerMap* pointers,
761  int arguments,
762  Safepoint::DeoptMode deopt_mode) {
763  RecordSafepoint(
764  pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
765 }
766 
767 
768 void LCodeGen::RecordPosition(int position) {
769  if (position == RelocInfo::kNoPosition) return;
770  masm()->positions_recorder()->RecordPosition(position);
771 }
772 
773 
774 void LCodeGen::DoLabel(LLabel* label) {
775  if (label->is_loop_header()) {
776  Comment(";;; B%d - LOOP entry", label->block_id());
777  } else {
778  Comment(";;; B%d", label->block_id());
779  }
780  __ bind(label->label());
781  current_block_ = label->block_id();
782  DoGap(label);
783 }
784 
785 
786 void LCodeGen::DoParallelMove(LParallelMove* move) {
787  resolver_.Resolve(move);
788 }
789 
790 
791 void LCodeGen::DoGap(LGap* gap) {
792  for (int i = LGap::FIRST_INNER_POSITION;
794  i++) {
795  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
796  LParallelMove* move = gap->GetParallelMove(inner_pos);
797  if (move != NULL) DoParallelMove(move);
798  }
799 }
800 
801 
802 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
803  DoGap(instr);
804 }
805 
806 
807 void LCodeGen::DoParameter(LParameter* instr) {
808  // Nothing to do.
809 }
810 
811 
812 void LCodeGen::DoCallStub(LCallStub* instr) {
813  ASSERT(ToRegister(instr->result()).is(v0));
814  switch (instr->hydrogen()->major_key()) {
815  case CodeStub::RegExpConstructResult: {
816  RegExpConstructResultStub stub;
817  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
818  break;
819  }
820  case CodeStub::RegExpExec: {
821  RegExpExecStub stub;
822  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
823  break;
824  }
825  case CodeStub::SubString: {
826  SubStringStub stub;
827  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
828  break;
829  }
830  case CodeStub::NumberToString: {
831  NumberToStringStub stub;
832  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
833  break;
834  }
835  case CodeStub::StringAdd: {
836  StringAddStub stub(NO_STRING_ADD_FLAGS);
837  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
838  break;
839  }
840  case CodeStub::StringCompare: {
841  StringCompareStub stub;
842  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
843  break;
844  }
845  case CodeStub::TranscendentalCache: {
846  __ lw(a0, MemOperand(sp, 0));
847  TranscendentalCacheStub stub(instr->transcendental_type(),
849  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
850  break;
851  }
852  default:
853  UNREACHABLE();
854  }
855 }
856 
857 
858 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
859  // Nothing to do.
860 }
861 
862 
863 void LCodeGen::DoModI(LModI* instr) {
864  Register scratch = scratch0();
865  const Register left = ToRegister(instr->InputAt(0));
866  const Register result = ToRegister(instr->result());
867 
868  Label done;
869 
870  if (instr->hydrogen()->HasPowerOf2Divisor()) {
871  Register scratch = scratch0();
872  ASSERT(!left.is(scratch));
873  __ mov(scratch, left);
874  int32_t p2constant = HConstant::cast(
875  instr->hydrogen()->right())->Integer32Value();
876  ASSERT(p2constant != 0);
877  // Result always takes the sign of the dividend (left).
878  p2constant = abs(p2constant);
879 
880  Label positive_dividend;
881  __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
882  __ subu(result, zero_reg, left);
883  __ And(result, result, p2constant - 1);
884  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
885  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
886  }
887  __ Branch(USE_DELAY_SLOT, &done);
888  __ subu(result, zero_reg, result);
889  __ bind(&positive_dividend);
890  __ And(result, scratch, p2constant - 1);
891  } else {
892  // div runs in the background while we check for special cases.
893  Register right = EmitLoadRegister(instr->InputAt(1), scratch);
894  __ div(left, right);
895 
896  // Check for x % 0.
897  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
898  DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
899  }
900 
901  __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
902  __ mfhi(result);
903 
904  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
905  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
906  }
907  }
908  __ bind(&done);
909 }
910 
911 
912 void LCodeGen::DoDivI(LDivI* instr) {
913  const Register left = ToRegister(instr->InputAt(0));
914  const Register right = ToRegister(instr->InputAt(1));
915  const Register result = ToRegister(instr->result());
916 
917  // On MIPS div is asynchronous - it will run in the background while we
918  // check for special cases.
919  __ div(left, right);
920 
921  // Check for x / 0.
922  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
923  DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
924  }
925 
926  // Check for (0 / -x) that will produce negative zero.
927  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
928  Label left_not_zero;
929  __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
930  DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
931  __ bind(&left_not_zero);
932  }
933 
934  // Check for (-kMinInt / -1).
935  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
936  Label left_not_min_int;
937  __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
938  DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
939  __ bind(&left_not_min_int);
940  }
941 
942  __ mfhi(result);
943  DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
944  __ mflo(result);
945 }
946 
947 
948 void LCodeGen::DoMulI(LMulI* instr) {
949  Register scratch = scratch0();
950  Register result = ToRegister(instr->result());
951  // Note that result may alias left.
952  Register left = ToRegister(instr->InputAt(0));
953  LOperand* right_op = instr->InputAt(1);
954 
955  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
956  bool bailout_on_minus_zero =
957  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
958 
959  if (right_op->IsConstantOperand() && !can_overflow) {
960  // Use optimized code for specific constants.
961  int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
962 
963  if (bailout_on_minus_zero && (constant < 0)) {
964  // The case of a null constant will be handled separately.
965  // If constant is negative and left is null, the result should be -0.
966  DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
967  }
968 
969  switch (constant) {
970  case -1:
971  __ Subu(result, zero_reg, left);
972  break;
973  case 0:
974  if (bailout_on_minus_zero) {
975  // If left is strictly negative and the constant is null, the
976  // result is -0. Deoptimize if required, otherwise return 0.
977  DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
978  }
979  __ mov(result, zero_reg);
980  break;
981  case 1:
982  // Nothing to do.
983  __ Move(result, left);
984  break;
985  default:
986  // Multiplying by powers of two and powers of two plus or minus
987  // one can be done faster with shifted operands.
988  // For other constants we emit standard code.
989  int32_t mask = constant >> 31;
990  uint32_t constant_abs = (constant + mask) ^ mask;
991 
992  if (IsPowerOf2(constant_abs) ||
993  IsPowerOf2(constant_abs - 1) ||
994  IsPowerOf2(constant_abs + 1)) {
995  if (IsPowerOf2(constant_abs)) {
996  int32_t shift = WhichPowerOf2(constant_abs);
997  __ sll(result, left, shift);
998  } else if (IsPowerOf2(constant_abs - 1)) {
999  int32_t shift = WhichPowerOf2(constant_abs - 1);
1000  __ sll(result, left, shift);
1001  __ Addu(result, result, left);
1002  } else if (IsPowerOf2(constant_abs + 1)) {
1003  int32_t shift = WhichPowerOf2(constant_abs + 1);
1004  __ sll(result, left, shift);
1005  __ Subu(result, result, left);
1006  }
1007 
1008  // Correct the sign of the result is the constant is negative.
1009  if (constant < 0) {
1010  __ Subu(result, zero_reg, result);
1011  }
1012 
1013  } else {
1014  // Generate standard code.
1015  __ li(at, constant);
1016  __ Mul(result, left, at);
1017  }
1018  }
1019 
1020  } else {
1021  Register right = EmitLoadRegister(right_op, scratch);
1022  if (bailout_on_minus_zero) {
1023  __ Or(ToRegister(instr->TempAt(0)), left, right);
1024  }
1025 
1026  if (can_overflow) {
1027  // hi:lo = left * right.
1028  __ mult(left, right);
1029  __ mfhi(scratch);
1030  __ mflo(result);
1031  __ sra(at, result, 31);
1032  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1033  } else {
1034  __ Mul(result, left, right);
1035  }
1036 
1037  if (bailout_on_minus_zero) {
1038  // Bail out if the result is supposed to be negative zero.
1039  Label done;
1040  __ Branch(&done, ne, result, Operand(zero_reg));
1041  DeoptimizeIf(lt,
1042  instr->environment(),
1043  ToRegister(instr->TempAt(0)),
1044  Operand(zero_reg));
1045  __ bind(&done);
1046  }
1047  }
1048 }
1049 
1050 
1051 void LCodeGen::DoBitI(LBitI* instr) {
1052  LOperand* left_op = instr->InputAt(0);
1053  LOperand* right_op = instr->InputAt(1);
1054  ASSERT(left_op->IsRegister());
1055  Register left = ToRegister(left_op);
1056  Register result = ToRegister(instr->result());
1057  Operand right(no_reg);
1058 
1059  if (right_op->IsStackSlot() || right_op->IsArgument()) {
1060  right = Operand(EmitLoadRegister(right_op, at));
1061  } else {
1062  ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1063  right = ToOperand(right_op);
1064  }
1065 
1066  switch (instr->op()) {
1067  case Token::BIT_AND:
1068  __ And(result, left, right);
1069  break;
1070  case Token::BIT_OR:
1071  __ Or(result, left, right);
1072  break;
1073  case Token::BIT_XOR:
1074  __ Xor(result, left, right);
1075  break;
1076  default:
1077  UNREACHABLE();
1078  break;
1079  }
1080 }
1081 
1082 
1083 void LCodeGen::DoShiftI(LShiftI* instr) {
1084  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1085  // result may alias either of them.
1086  LOperand* right_op = instr->InputAt(1);
1087  Register left = ToRegister(instr->InputAt(0));
1088  Register result = ToRegister(instr->result());
1089 
1090  if (right_op->IsRegister()) {
1091  // No need to mask the right operand on MIPS, it is built into the variable
1092  // shift instructions.
1093  switch (instr->op()) {
1094  case Token::SAR:
1095  __ srav(result, left, ToRegister(right_op));
1096  break;
1097  case Token::SHR:
1098  __ srlv(result, left, ToRegister(right_op));
1099  if (instr->can_deopt()) {
1100  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1101  }
1102  break;
1103  case Token::SHL:
1104  __ sllv(result, left, ToRegister(right_op));
1105  break;
1106  default:
1107  UNREACHABLE();
1108  break;
1109  }
1110  } else {
1111  // Mask the right_op operand.
1112  int value = ToInteger32(LConstantOperand::cast(right_op));
1113  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1114  switch (instr->op()) {
1115  case Token::SAR:
1116  if (shift_count != 0) {
1117  __ sra(result, left, shift_count);
1118  } else {
1119  __ Move(result, left);
1120  }
1121  break;
1122  case Token::SHR:
1123  if (shift_count != 0) {
1124  __ srl(result, left, shift_count);
1125  } else {
1126  if (instr->can_deopt()) {
1127  __ And(at, left, Operand(0x80000000));
1128  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1129  }
1130  __ Move(result, left);
1131  }
1132  break;
1133  case Token::SHL:
1134  if (shift_count != 0) {
1135  __ sll(result, left, shift_count);
1136  } else {
1137  __ Move(result, left);
1138  }
1139  break;
1140  default:
1141  UNREACHABLE();
1142  break;
1143  }
1144  }
1145 }
1146 
1147 
1148 void LCodeGen::DoSubI(LSubI* instr) {
1149  LOperand* left = instr->InputAt(0);
1150  LOperand* right = instr->InputAt(1);
1151  LOperand* result = instr->result();
1152  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1153 
1154  if (!can_overflow) {
1155  if (right->IsStackSlot() || right->IsArgument()) {
1156  Register right_reg = EmitLoadRegister(right, at);
1157  __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1158  } else {
1159  ASSERT(right->IsRegister() || right->IsConstantOperand());
1160  __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1161  }
1162  } else { // can_overflow.
1163  Register overflow = scratch0();
1164  Register scratch = scratch1();
1165  if (right->IsStackSlot() ||
1166  right->IsArgument() ||
1167  right->IsConstantOperand()) {
1168  Register right_reg = EmitLoadRegister(right, scratch);
1169  __ SubuAndCheckForOverflow(ToRegister(result),
1170  ToRegister(left),
1171  right_reg,
1172  overflow); // Reg at also used as scratch.
1173  } else {
1174  ASSERT(right->IsRegister());
1175  // Due to overflow check macros not supporting constant operands,
1176  // handling the IsConstantOperand case was moved to prev if clause.
1177  __ SubuAndCheckForOverflow(ToRegister(result),
1178  ToRegister(left),
1179  ToRegister(right),
1180  overflow); // Reg at also used as scratch.
1181  }
1182  DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1183  }
1184 }
1185 
1186 
1187 void LCodeGen::DoConstantI(LConstantI* instr) {
1188  ASSERT(instr->result()->IsRegister());
1189  __ li(ToRegister(instr->result()), Operand(instr->value()));
1190 }
1191 
1192 
1193 void LCodeGen::DoConstantD(LConstantD* instr) {
1194  ASSERT(instr->result()->IsDoubleRegister());
1195  DoubleRegister result = ToDoubleRegister(instr->result());
1196  double v = instr->value();
1197  __ Move(result, v);
1198 }
1199 
1200 
1201 void LCodeGen::DoConstantT(LConstantT* instr) {
1202  Handle<Object> value = instr->value();
1203  if (value->IsSmi()) {
1204  __ li(ToRegister(instr->result()), Operand(value));
1205  } else {
1206  __ LoadHeapObject(ToRegister(instr->result()),
1207  Handle<HeapObject>::cast(value));
1208  }
1209 }
1210 
1211 
1212 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1213  Register result = ToRegister(instr->result());
1214  Register array = ToRegister(instr->InputAt(0));
1215  __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
1216 }
1217 
1218 
1219 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1220  Register result = ToRegister(instr->result());
1221  Register array = ToRegister(instr->InputAt(0));
1222  __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1223 }
1224 
1225 
1226 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1227  Register result = ToRegister(instr->result());
1228  Register input = ToRegister(instr->InputAt(0));
1229 
1230  // Load map into |result|.
1231  __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
1232  // Load the map's "bit field 2" into |result|. We only need the first byte,
1233  // but the following bit field extraction takes care of that anyway.
1234  __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
1235  // Retrieve elements_kind from bit field 2.
1237 }
1238 
1239 
1240 void LCodeGen::DoValueOf(LValueOf* instr) {
1241  Register input = ToRegister(instr->InputAt(0));
1242  Register result = ToRegister(instr->result());
1243  Register map = ToRegister(instr->TempAt(0));
1244  Label done;
1245 
1246  // If the object is a smi return the object.
1247  __ Move(result, input);
1248  __ JumpIfSmi(input, &done);
1249 
1250  // If the object is not a value type, return the object.
1251  __ GetObjectType(input, map, map);
1252  __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
1253  __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
1254 
1255  __ bind(&done);
1256 }
1257 
1258 
1259 void LCodeGen::DoDateField(LDateField* instr) {
1260  Register object = ToRegister(instr->InputAt(0));
1261  Register result = ToRegister(instr->result());
1262  Register scratch = ToRegister(instr->TempAt(0));
1263  Smi* index = instr->index();
1264  Label runtime, done;
1265  ASSERT(object.is(a0));
1266  ASSERT(result.is(v0));
1267  ASSERT(!scratch.is(scratch0()));
1268  ASSERT(!scratch.is(object));
1269 
1270 #ifdef DEBUG
1271  __ AbortIfSmi(object);
1272  __ GetObjectType(object, scratch, scratch);
1273  __ Assert(eq, "Trying to get date field from non-date.",
1274  scratch, Operand(JS_DATE_TYPE));
1275 #endif
1276 
1277  if (index->value() == 0) {
1278  __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1279  } else {
1280  if (index->value() < JSDate::kFirstUncachedField) {
1281  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1282  __ li(scratch, Operand(stamp));
1283  __ lw(scratch, MemOperand(scratch));
1284  __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1285  __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1286  __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1287  kPointerSize * index->value()));
1288  __ jmp(&done);
1289  }
1290  __ bind(&runtime);
1291  __ PrepareCallCFunction(2, scratch);
1292  __ li(a1, Operand(index));
1293  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1294  __ bind(&done);
1295  }
1296 }
1297 
1298 
1299 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1300  Register input = ToRegister(instr->InputAt(0));
1301  Register result = ToRegister(instr->result());
1302  __ Nor(result, zero_reg, Operand(input));
1303 }
1304 
1305 
1306 void LCodeGen::DoThrow(LThrow* instr) {
1307  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1308  __ push(input_reg);
1309  CallRuntime(Runtime::kThrow, 1, instr);
1310 
1311  if (FLAG_debug_code) {
1312  __ stop("Unreachable code.");
1313  }
1314 }
1315 
1316 
1317 void LCodeGen::DoAddI(LAddI* instr) {
1318  LOperand* left = instr->InputAt(0);
1319  LOperand* right = instr->InputAt(1);
1320  LOperand* result = instr->result();
1321  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1322 
1323  if (!can_overflow) {
1324  if (right->IsStackSlot() || right->IsArgument()) {
1325  Register right_reg = EmitLoadRegister(right, at);
1326  __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1327  } else {
1328  ASSERT(right->IsRegister() || right->IsConstantOperand());
1329  __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1330  }
1331  } else { // can_overflow.
1332  Register overflow = scratch0();
1333  Register scratch = scratch1();
1334  if (right->IsStackSlot() ||
1335  right->IsArgument() ||
1336  right->IsConstantOperand()) {
1337  Register right_reg = EmitLoadRegister(right, scratch);
1338  __ AdduAndCheckForOverflow(ToRegister(result),
1339  ToRegister(left),
1340  right_reg,
1341  overflow); // Reg at also used as scratch.
1342  } else {
1343  ASSERT(right->IsRegister());
1344  // Due to overflow check macros not supporting constant operands,
1345  // handling the IsConstantOperand case was moved to prev if clause.
1346  __ AdduAndCheckForOverflow(ToRegister(result),
1347  ToRegister(left),
1348  ToRegister(right),
1349  overflow); // Reg at also used as scratch.
1350  }
1351  DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1352  }
1353 }
1354 
1355 
1356 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1357  DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1358  DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1359  DoubleRegister result = ToDoubleRegister(instr->result());
1360  switch (instr->op()) {
1361  case Token::ADD:
1362  __ add_d(result, left, right);
1363  break;
1364  case Token::SUB:
1365  __ sub_d(result, left, right);
1366  break;
1367  case Token::MUL:
1368  __ mul_d(result, left, right);
1369  break;
1370  case Token::DIV:
1371  __ div_d(result, left, right);
1372  break;
1373  case Token::MOD: {
1374  // Save a0-a3 on the stack.
1375  RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1376  __ MultiPush(saved_regs);
1377 
1378  __ PrepareCallCFunction(0, 2, scratch0());
1379  __ SetCallCDoubleArguments(left, right);
1380  __ CallCFunction(
1381  ExternalReference::double_fp_operation(Token::MOD, isolate()),
1382  0, 2);
1383  // Move the result in the double result register.
1384  __ GetCFunctionDoubleResult(result);
1385 
1386  // Restore saved register.
1387  __ MultiPop(saved_regs);
1388  break;
1389  }
1390  default:
1391  UNREACHABLE();
1392  break;
1393  }
1394 }
1395 
1396 
1397 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1398  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
1399  ASSERT(ToRegister(instr->InputAt(1)).is(a0));
1400  ASSERT(ToRegister(instr->result()).is(v0));
1401 
1402  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1403  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1404  // Other arch use a nop here, to signal that there is no inlined
1405  // patchable code. Mips does not need the nop, since our marker
1406  // instruction (andi zero_reg) will never be used in normal code.
1407 }
1408 
1409 
1410 int LCodeGen::GetNextEmittedBlock(int block) {
1411  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1412  LLabel* label = chunk_->GetLabel(i);
1413  if (!label->HasReplacement()) return i;
1414  }
1415  return -1;
1416 }
1417 
1418 
1419 void LCodeGen::EmitBranch(int left_block, int right_block,
1420  Condition cc, Register src1, const Operand& src2) {
1421  int next_block = GetNextEmittedBlock(current_block_);
1422  right_block = chunk_->LookupDestination(right_block);
1423  left_block = chunk_->LookupDestination(left_block);
1424  if (right_block == left_block) {
1425  EmitGoto(left_block);
1426  } else if (left_block == next_block) {
1427  __ Branch(chunk_->GetAssemblyLabel(right_block),
1428  NegateCondition(cc), src1, src2);
1429  } else if (right_block == next_block) {
1430  __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1431  } else {
1432  __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1433  __ Branch(chunk_->GetAssemblyLabel(right_block));
1434  }
1435 }
1436 
1437 
1438 void LCodeGen::EmitBranchF(int left_block, int right_block,
1439  Condition cc, FPURegister src1, FPURegister src2) {
1440  int next_block = GetNextEmittedBlock(current_block_);
1441  right_block = chunk_->LookupDestination(right_block);
1442  left_block = chunk_->LookupDestination(left_block);
1443  if (right_block == left_block) {
1444  EmitGoto(left_block);
1445  } else if (left_block == next_block) {
1446  __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1447  NegateCondition(cc), src1, src2);
1448  } else if (right_block == next_block) {
1449  __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1450  } else {
1451  __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1452  __ Branch(chunk_->GetAssemblyLabel(right_block));
1453  }
1454 }
1455 
1456 
1457 void LCodeGen::DoBranch(LBranch* instr) {
1458  int true_block = chunk_->LookupDestination(instr->true_block_id());
1459  int false_block = chunk_->LookupDestination(instr->false_block_id());
1460 
1461  Representation r = instr->hydrogen()->value()->representation();
1462  if (r.IsInteger32()) {
1463  Register reg = ToRegister(instr->InputAt(0));
1464  EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1465  } else if (r.IsDouble()) {
1466  DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1467  // Test the double value. Zero and NaN are false.
1468  EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
1469  } else {
1470  ASSERT(r.IsTagged());
1471  Register reg = ToRegister(instr->InputAt(0));
1472  HType type = instr->hydrogen()->value()->type();
1473  if (type.IsBoolean()) {
1474  __ LoadRoot(at, Heap::kTrueValueRootIndex);
1475  EmitBranch(true_block, false_block, eq, reg, Operand(at));
1476  } else if (type.IsSmi()) {
1477  EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1478  } else {
1479  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1480  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1481 
1482  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1483  // Avoid deopts in the case where we've never executed this path before.
1484  if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1485 
1486  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1487  // undefined -> false.
1488  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1489  __ Branch(false_label, eq, reg, Operand(at));
1490  }
1491  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1492  // Boolean -> its value.
1493  __ LoadRoot(at, Heap::kTrueValueRootIndex);
1494  __ Branch(true_label, eq, reg, Operand(at));
1495  __ LoadRoot(at, Heap::kFalseValueRootIndex);
1496  __ Branch(false_label, eq, reg, Operand(at));
1497  }
1498  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1499  // 'null' -> false.
1500  __ LoadRoot(at, Heap::kNullValueRootIndex);
1501  __ Branch(false_label, eq, reg, Operand(at));
1502  }
1503 
1504  if (expected.Contains(ToBooleanStub::SMI)) {
1505  // Smis: 0 -> false, all other -> true.
1506  __ Branch(false_label, eq, reg, Operand(zero_reg));
1507  __ JumpIfSmi(reg, true_label);
1508  } else if (expected.NeedsMap()) {
1509  // If we need a map later and have a Smi -> deopt.
1510  __ And(at, reg, Operand(kSmiTagMask));
1511  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1512  }
1513 
1514  const Register map = scratch0();
1515  if (expected.NeedsMap()) {
1516  __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1517  if (expected.CanBeUndetectable()) {
1518  // Undetectable -> false.
1519  __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1520  __ And(at, at, Operand(1 << Map::kIsUndetectable));
1521  __ Branch(false_label, ne, at, Operand(zero_reg));
1522  }
1523  }
1524 
1525  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1526  // spec object -> true.
1528  __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1529  }
1530 
1531  if (expected.Contains(ToBooleanStub::STRING)) {
1532  // String value -> false iff empty.
1533  Label not_string;
1535  __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
1537  __ Branch(true_label, ne, at, Operand(zero_reg));
1538  __ Branch(false_label);
1539  __ bind(&not_string);
1540  }
1541 
1542  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1543  // heap number -> false iff +0, -0, or NaN.
1544  DoubleRegister dbl_scratch = double_scratch0();
1545  Label not_heap_number;
1546  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1547  __ Branch(&not_heap_number, ne, map, Operand(at));
1548  __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1549  __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
1550  // Falls through if dbl_scratch == 0.
1551  __ Branch(false_label);
1552  __ bind(&not_heap_number);
1553  }
1554 
1555  // We've seen something for the first time -> deopt.
1556  DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
1557  }
1558  }
1559 }
1560 
1561 
1562 void LCodeGen::EmitGoto(int block) {
1563  block = chunk_->LookupDestination(block);
1564  int next_block = GetNextEmittedBlock(current_block_);
1565  if (block != next_block) {
1566  __ jmp(chunk_->GetAssemblyLabel(block));
1567  }
1568 }
1569 
1570 
1571 void LCodeGen::DoGoto(LGoto* instr) {
1572  EmitGoto(instr->block_id());
1573 }
1574 
1575 
1576 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1577  Condition cond = kNoCondition;
1578  switch (op) {
1579  case Token::EQ:
1580  case Token::EQ_STRICT:
1581  cond = eq;
1582  break;
1583  case Token::LT:
1584  cond = is_unsigned ? lo : lt;
1585  break;
1586  case Token::GT:
1587  cond = is_unsigned ? hi : gt;
1588  break;
1589  case Token::LTE:
1590  cond = is_unsigned ? ls : le;
1591  break;
1592  case Token::GTE:
1593  cond = is_unsigned ? hs : ge;
1594  break;
1595  case Token::IN:
1596  case Token::INSTANCEOF:
1597  default:
1598  UNREACHABLE();
1599  }
1600  return cond;
1601 }
1602 
1603 
1604 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1605  LOperand* left = instr->InputAt(0);
1606  LOperand* right = instr->InputAt(1);
1607  int false_block = chunk_->LookupDestination(instr->false_block_id());
1608  int true_block = chunk_->LookupDestination(instr->true_block_id());
1609 
1610  Condition cond = TokenToCondition(instr->op(), false);
1611 
1612  if (left->IsConstantOperand() && right->IsConstantOperand()) {
1613  // We can statically evaluate the comparison.
1614  double left_val = ToDouble(LConstantOperand::cast(left));
1615  double right_val = ToDouble(LConstantOperand::cast(right));
1616  int next_block =
1617  EvalComparison(instr->op(), left_val, right_val) ? true_block
1618  : false_block;
1619  EmitGoto(next_block);
1620  } else {
1621  if (instr->is_double()) {
1622  // Compare left and right as doubles and load the
1623  // resulting flags into the normal status register.
1624  FPURegister left_reg = ToDoubleRegister(left);
1625  FPURegister right_reg = ToDoubleRegister(right);
1626 
1627  // If a NaN is involved, i.e. the result is unordered,
1628  // jump to false block label.
1629  __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
1630  left_reg, right_reg);
1631 
1632  EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
1633  } else {
1634  Register cmp_left;
1635  Operand cmp_right = Operand(0);
1636 
1637  if (right->IsConstantOperand()) {
1638  cmp_left = ToRegister(left);
1639  cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
1640  } else if (left->IsConstantOperand()) {
1641  cmp_left = ToRegister(right);
1642  cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
1643  // We transposed the operands. Reverse the condition.
1644  cond = ReverseCondition(cond);
1645  } else {
1646  cmp_left = ToRegister(left);
1647  cmp_right = Operand(ToRegister(right));
1648  }
1649 
1650  EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
1651  }
1652  }
1653 }
1654 
1655 
1656 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1657  Register left = ToRegister(instr->InputAt(0));
1658  Register right = ToRegister(instr->InputAt(1));
1659  int false_block = chunk_->LookupDestination(instr->false_block_id());
1660  int true_block = chunk_->LookupDestination(instr->true_block_id());
1661 
1662  EmitBranch(true_block, false_block, eq, left, Operand(right));
1663 }
1664 
1665 
1666 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1667  Register left = ToRegister(instr->InputAt(0));
1668  int true_block = chunk_->LookupDestination(instr->true_block_id());
1669  int false_block = chunk_->LookupDestination(instr->false_block_id());
1670 
1671  EmitBranch(true_block, false_block, eq, left,
1672  Operand(instr->hydrogen()->right()));
1673 }
1674 
1675 
1676 
1677 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1678  Register scratch = scratch0();
1679  Register reg = ToRegister(instr->InputAt(0));
1680  int false_block = chunk_->LookupDestination(instr->false_block_id());
1681 
1682  // If the expression is known to be untagged or a smi, then it's definitely
1683  // not null, and it can't be a an undetectable object.
1684  if (instr->hydrogen()->representation().IsSpecialization() ||
1685  instr->hydrogen()->type().IsSmi()) {
1686  EmitGoto(false_block);
1687  return;
1688  }
1689 
1690  int true_block = chunk_->LookupDestination(instr->true_block_id());
1691 
1692  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1693  Heap::kNullValueRootIndex :
1694  Heap::kUndefinedValueRootIndex;
1695  __ LoadRoot(at, nil_value);
1696  if (instr->kind() == kStrictEquality) {
1697  EmitBranch(true_block, false_block, eq, reg, Operand(at));
1698  } else {
1699  Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1700  Heap::kUndefinedValueRootIndex :
1701  Heap::kNullValueRootIndex;
1702  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1703  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1704  __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1705  __ LoadRoot(at, other_nil_value); // In the delay slot.
1706  __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1707  __ JumpIfSmi(reg, false_label); // In the delay slot.
1708  // Check for undetectable objects by looking in the bit field in
1709  // the map. The object has already been smi checked.
1710  __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1711  __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1712  __ And(scratch, scratch, 1 << Map::kIsUndetectable);
1713  EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
1714  }
1715 }
1716 
1717 
1718 Condition LCodeGen::EmitIsObject(Register input,
1719  Register temp1,
1720  Register temp2,
1721  Label* is_not_object,
1722  Label* is_object) {
1723  __ JumpIfSmi(input, is_not_object);
1724 
1725  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1726  __ Branch(is_object, eq, input, Operand(temp2));
1727 
1728  // Load map.
1729  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1730  // Undetectable objects behave like undefined.
1731  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1732  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
1733  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
1734 
1735  // Load instance type and check that it is in object type range.
1736  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
1737  __ Branch(is_not_object,
1738  lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1739 
1740  return le;
1741 }
1742 
1743 
1744 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1745  Register reg = ToRegister(instr->InputAt(0));
1746  Register temp1 = ToRegister(instr->TempAt(0));
1747  Register temp2 = scratch0();
1748 
1749  int true_block = chunk_->LookupDestination(instr->true_block_id());
1750  int false_block = chunk_->LookupDestination(instr->false_block_id());
1751  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1752  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1753 
1754  Condition true_cond =
1755  EmitIsObject(reg, temp1, temp2, false_label, true_label);
1756 
1757  EmitBranch(true_block, false_block, true_cond, temp2,
1759 }
1760 
1761 
1762 Condition LCodeGen::EmitIsString(Register input,
1763  Register temp1,
1764  Label* is_not_string) {
1765  __ JumpIfSmi(input, is_not_string);
1766  __ GetObjectType(input, temp1, temp1);
1767 
1768  return lt;
1769 }
1770 
1771 
1772 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1773  Register reg = ToRegister(instr->InputAt(0));
1774  Register temp1 = ToRegister(instr->TempAt(0));
1775 
1776  int true_block = chunk_->LookupDestination(instr->true_block_id());
1777  int false_block = chunk_->LookupDestination(instr->false_block_id());
1778  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1779 
1780  Condition true_cond =
1781  EmitIsString(reg, temp1, false_label);
1782 
1783  EmitBranch(true_block, false_block, true_cond, temp1,
1784  Operand(FIRST_NONSTRING_TYPE));
1785 }
1786 
1787 
1788 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1789  int true_block = chunk_->LookupDestination(instr->true_block_id());
1790  int false_block = chunk_->LookupDestination(instr->false_block_id());
1791 
1792  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1793  __ And(at, input_reg, kSmiTagMask);
1794  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1795 }
1796 
1797 
1798 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1799  Register input = ToRegister(instr->InputAt(0));
1800  Register temp = ToRegister(instr->TempAt(0));
1801 
1802  int true_block = chunk_->LookupDestination(instr->true_block_id());
1803  int false_block = chunk_->LookupDestination(instr->false_block_id());
1804 
1805  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1806  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
1807  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1808  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
1809  EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
1810 }
1811 
1812 
1813 static Condition ComputeCompareCondition(Token::Value op) {
1814  switch (op) {
1815  case Token::EQ_STRICT:
1816  case Token::EQ:
1817  return eq;
1818  case Token::LT:
1819  return lt;
1820  case Token::GT:
1821  return gt;
1822  case Token::LTE:
1823  return le;
1824  case Token::GTE:
1825  return ge;
1826  default:
1827  UNREACHABLE();
1828  return kNoCondition;
1829  }
1830 }
1831 
1832 
1833 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1834  Token::Value op = instr->op();
1835  int true_block = chunk_->LookupDestination(instr->true_block_id());
1836  int false_block = chunk_->LookupDestination(instr->false_block_id());
1837 
1838  Handle<Code> ic = CompareIC::GetUninitialized(op);
1839  CallCode(ic, RelocInfo::CODE_TARGET, instr);
1840 
1841  Condition condition = ComputeCompareCondition(op);
1842 
1843  EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
1844 }
1845 
1846 
1847 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1848  InstanceType from = instr->from();
1849  InstanceType to = instr->to();
1850  if (from == FIRST_TYPE) return to;
1851  ASSERT(from == to || to == LAST_TYPE);
1852  return from;
1853 }
1854 
1855 
1856 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1857  InstanceType from = instr->from();
1858  InstanceType to = instr->to();
1859  if (from == to) return eq;
1860  if (to == LAST_TYPE) return hs;
1861  if (from == FIRST_TYPE) return ls;
1862  UNREACHABLE();
1863  return eq;
1864 }
1865 
1866 
1867 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1868  Register scratch = scratch0();
1869  Register input = ToRegister(instr->InputAt(0));
1870 
1871  int true_block = chunk_->LookupDestination(instr->true_block_id());
1872  int false_block = chunk_->LookupDestination(instr->false_block_id());
1873 
1874  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1875 
1876  __ JumpIfSmi(input, false_label);
1877 
1878  __ GetObjectType(input, scratch, scratch);
1879  EmitBranch(true_block,
1880  false_block,
1881  BranchCondition(instr->hydrogen()),
1882  scratch,
1883  Operand(TestType(instr->hydrogen())));
1884 }
1885 
1886 
1887 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1888  Register input = ToRegister(instr->InputAt(0));
1889  Register result = ToRegister(instr->result());
1890 
1891  if (FLAG_debug_code) {
1892  __ AbortIfNotString(input);
1893  }
1894 
1895  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
1896  __ IndexFromHash(result, result);
1897 }
1898 
1899 
1900 void LCodeGen::DoHasCachedArrayIndexAndBranch(
1901  LHasCachedArrayIndexAndBranch* instr) {
1902  Register input = ToRegister(instr->InputAt(0));
1903  Register scratch = scratch0();
1904 
1905  int true_block = chunk_->LookupDestination(instr->true_block_id());
1906  int false_block = chunk_->LookupDestination(instr->false_block_id());
1907 
1908  __ lw(scratch,
1910  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
1911  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1912 }
1913 
1914 
1915 // Branches to a label or falls through with the answer in flags. Trashes
1916 // the temp registers, but not the input.
1917 void LCodeGen::EmitClassOfTest(Label* is_true,
1918  Label* is_false,
1919  Handle<String>class_name,
1920  Register input,
1921  Register temp,
1922  Register temp2) {
1923  ASSERT(!input.is(temp));
1924  ASSERT(!input.is(temp2));
1925  ASSERT(!temp.is(temp2));
1926 
1927  __ JumpIfSmi(input, is_false);
1928 
1929  if (class_name->IsEqualTo(CStrVector("Function"))) {
1930  // Assuming the following assertions, we can use the same compares to test
1931  // for both being a function type and being in the object type range.
1936  LAST_SPEC_OBJECT_TYPE - 1);
1938 
1939  __ GetObjectType(input, temp, temp2);
1940  __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1941  __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1942  __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
1943  } else {
1944  // Faster code path to avoid two compares: subtract lower bound from the
1945  // actual type and do a signed compare with the width of the type range.
1946  __ GetObjectType(input, temp, temp2);
1947  __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1948  __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
1950  }
1951 
1952  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
1953  // Check if the constructor in the map is a function.
1954  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
1955 
1956  // Objects with a non-function constructor have class 'Object'.
1957  __ GetObjectType(temp, temp2, temp2);
1958  if (class_name->IsEqualTo(CStrVector("Object"))) {
1959  __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
1960  } else {
1961  __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
1962  }
1963 
1964  // temp now contains the constructor function. Grab the
1965  // instance class name from there.
1967  __ lw(temp, FieldMemOperand(temp,
1969  // The class name we are testing against is a symbol because it's a literal.
1970  // The name in the constructor is a symbol because of the way the context is
1971  // booted. This routine isn't expected to work for random API-created
1972  // classes and it doesn't have to because you can't access it with natives
1973  // syntax. Since both sides are symbols it is sufficient to use an identity
1974  // comparison.
1975 
1976  // End with the address of this class_name instance in temp register.
1977  // On MIPS, the caller must do the comparison with Handle<String>class_name.
1978 }
1979 
1980 
1981 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1982  Register input = ToRegister(instr->InputAt(0));
1983  Register temp = scratch0();
1984  Register temp2 = ToRegister(instr->TempAt(0));
1985  Handle<String> class_name = instr->hydrogen()->class_name();
1986 
1987  int true_block = chunk_->LookupDestination(instr->true_block_id());
1988  int false_block = chunk_->LookupDestination(instr->false_block_id());
1989 
1990  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1991  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1992 
1993  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1994 
1995  EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
1996 }
1997 
1998 
1999 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2000  Register reg = ToRegister(instr->InputAt(0));
2001  Register temp = ToRegister(instr->TempAt(0));
2002  int true_block = instr->true_block_id();
2003  int false_block = instr->false_block_id();
2004 
2005  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2006  EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
2007 }
2008 
2009 
2010 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2011  Label true_label, done;
2012  ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0.
2013  ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1.
2014  Register result = ToRegister(instr->result());
2015  ASSERT(result.is(v0));
2016 
2017  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2018  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2019 
2020  __ Branch(&true_label, eq, result, Operand(zero_reg));
2021  __ li(result, Operand(factory()->false_value()));
2022  __ Branch(&done);
2023  __ bind(&true_label);
2024  __ li(result, Operand(factory()->true_value()));
2025  __ bind(&done);
2026 }
2027 
2028 
2029 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2030  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2031  public:
2032  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2033  LInstanceOfKnownGlobal* instr)
2034  : LDeferredCode(codegen), instr_(instr) { }
2035  virtual void Generate() {
2036  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2037  }
2038  virtual LInstruction* instr() { return instr_; }
2039  Label* map_check() { return &map_check_; }
2040 
2041  private:
2042  LInstanceOfKnownGlobal* instr_;
2043  Label map_check_;
2044  };
2045 
2046  DeferredInstanceOfKnownGlobal* deferred;
2047  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2048 
2049  Label done, false_result;
2050  Register object = ToRegister(instr->InputAt(0));
2051  Register temp = ToRegister(instr->TempAt(0));
2052  Register result = ToRegister(instr->result());
2053 
2054  ASSERT(object.is(a0));
2055  ASSERT(result.is(v0));
2056 
2057  // A Smi is not instance of anything.
2058  __ JumpIfSmi(object, &false_result);
2059 
2060  // This is the inlined call site instanceof cache. The two occurences of the
2061  // hole value will be patched to the last map/result pair generated by the
2062  // instanceof stub.
2063  Label cache_miss;
2064  Register map = temp;
2065  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2066 
2067  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2068  __ bind(deferred->map_check()); // Label for calculating code patching.
2069  // We use Factory::the_hole_value() on purpose instead of loading from the
2070  // root array to force relocation to be able to later patch with
2071  // the cached map.
2072  Handle<JSGlobalPropertyCell> cell =
2073  factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2074  __ li(at, Operand(Handle<Object>(cell)));
2076  __ Branch(&cache_miss, ne, map, Operand(at));
2077  // We use Factory::the_hole_value() on purpose instead of loading from the
2078  // root array to force relocation to be able to later patch
2079  // with true or false.
2080  __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2081  __ Branch(&done);
2082 
2083  // The inlined call site cache did not match. Check null and string before
2084  // calling the deferred code.
2085  __ bind(&cache_miss);
2086  // Null is not instance of anything.
2087  __ LoadRoot(temp, Heap::kNullValueRootIndex);
2088  __ Branch(&false_result, eq, object, Operand(temp));
2089 
2090  // String values is not instance of anything.
2091  Condition cc = __ IsObjectStringType(object, temp, temp);
2092  __ Branch(&false_result, cc, temp, Operand(zero_reg));
2093 
2094  // Go to the deferred code.
2095  __ Branch(deferred->entry());
2096 
2097  __ bind(&false_result);
2098  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2099 
2100  // Here result has either true or false. Deferred code also produces true or
2101  // false object.
2102  __ bind(deferred->exit());
2103  __ bind(&done);
2104 }
2105 
2106 
2107 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2108  Label* map_check) {
2109  Register result = ToRegister(instr->result());
2110  ASSERT(result.is(v0));
2111 
2113  flags = static_cast<InstanceofStub::Flags>(
2115  flags = static_cast<InstanceofStub::Flags>(
2117  flags = static_cast<InstanceofStub::Flags>(
2119  InstanceofStub stub(flags);
2120 
2121  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2122 
2123  // Get the temp register reserved by the instruction. This needs to be t0 as
2124  // its slot of the pushing of safepoint registers is used to communicate the
2125  // offset to the location of the map check.
2126  Register temp = ToRegister(instr->TempAt(0));
2127  ASSERT(temp.is(t0));
2128  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2129  static const int kAdditionalDelta = 7;
2130  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2131  Label before_push_delta;
2132  __ bind(&before_push_delta);
2133  {
2134  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2135  __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2136  __ StoreToSafepointRegisterSlot(temp, temp);
2137  }
2138  CallCodeGeneric(stub.GetCode(),
2139  RelocInfo::CODE_TARGET,
2140  instr,
2141  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2142  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2143  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2144  // Put the result value into the result register slot and
2145  // restore all registers.
2146  __ StoreToSafepointRegisterSlot(result, result);
2147 }
2148 
2149 
2150 void LCodeGen::DoCmpT(LCmpT* instr) {
2151  Token::Value op = instr->op();
2152 
2153  Handle<Code> ic = CompareIC::GetUninitialized(op);
2154  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2155  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2156 
2157  Condition condition = ComputeCompareCondition(op);
2158  // A minor optimization that relies on LoadRoot always emitting one
2159  // instruction.
2160  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2161  Label done;
2162  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2163  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2164  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2165  ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
2166  __ bind(&done);
2167 }
2168 
2169 
2170 void LCodeGen::DoReturn(LReturn* instr) {
2171  if (FLAG_trace) {
2172  // Push the return value on the stack as the parameter.
2173  // Runtime::TraceExit returns its parameter in v0.
2174  __ push(v0);
2175  __ CallRuntime(Runtime::kTraceExit, 1);
2176  }
2177  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2178  __ mov(sp, fp);
2179  __ Pop(ra, fp);
2180  __ Addu(sp, sp, Operand(sp_delta));
2181  __ Jump(ra);
2182 }
2183 
2184 
2185 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2186  Register result = ToRegister(instr->result());
2187  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2189  if (instr->hydrogen()->RequiresHoleCheck()) {
2190  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2191  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2192  }
2193 }
2194 
2195 
2196 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2197  ASSERT(ToRegister(instr->global_object()).is(a0));
2198  ASSERT(ToRegister(instr->result()).is(v0));
2199 
2200  __ li(a2, Operand(instr->name()));
2201  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2202  : RelocInfo::CODE_TARGET_CONTEXT;
2203  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2204  CallCode(ic, mode, instr);
2205 }
2206 
2207 
2208 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2209  Register value = ToRegister(instr->value());
2210  Register cell = scratch0();
2211 
2212  // Load the cell.
2213  __ li(cell, Operand(instr->hydrogen()->cell()));
2214 
2215  // If the cell we are storing to contains the hole it could have
2216  // been deleted from the property dictionary. In that case, we need
2217  // to update the property details in the property dictionary to mark
2218  // it as no longer deleted.
2219  if (instr->hydrogen()->RequiresHoleCheck()) {
2220  // We use a temp to check the payload.
2221  Register payload = ToRegister(instr->TempAt(0));
2223  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2224  DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2225  }
2226 
2227  // Store the value.
2229  // Cells are always rescanned, so no write barrier here.
2230 }
2231 
2232 
2233 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2234  ASSERT(ToRegister(instr->global_object()).is(a1));
2235  ASSERT(ToRegister(instr->value()).is(a0));
2236 
2237  __ li(a2, Operand(instr->name()));
2238  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2239  ? isolate()->builtins()->StoreIC_Initialize_Strict()
2240  : isolate()->builtins()->StoreIC_Initialize();
2241  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2242 }
2243 
2244 
2245 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2246  Register context = ToRegister(instr->context());
2247  Register result = ToRegister(instr->result());
2248 
2249  __ lw(result, ContextOperand(context, instr->slot_index()));
2250  if (instr->hydrogen()->RequiresHoleCheck()) {
2251  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2252 
2253  if (instr->hydrogen()->DeoptimizesOnHole()) {
2254  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2255  } else {
2256  Label is_not_hole;
2257  __ Branch(&is_not_hole, ne, result, Operand(at));
2258  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2259  __ bind(&is_not_hole);
2260  }
2261  }
2262 }
2263 
2264 
2265 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2266  Register context = ToRegister(instr->context());
2267  Register value = ToRegister(instr->value());
2268  Register scratch = scratch0();
2269  MemOperand target = ContextOperand(context, instr->slot_index());
2270 
2271  Label skip_assignment;
2272 
2273  if (instr->hydrogen()->RequiresHoleCheck()) {
2274  __ lw(scratch, target);
2275  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2276 
2277  if (instr->hydrogen()->DeoptimizesOnHole()) {
2278  DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2279  } else {
2280  __ Branch(&skip_assignment, ne, scratch, Operand(at));
2281  }
2282  }
2283 
2284  __ sw(value, target);
2285  if (instr->hydrogen()->NeedsWriteBarrier()) {
2286  HType type = instr->hydrogen()->value()->type();
2287  SmiCheck check_needed =
2288  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2289  __ RecordWriteContextSlot(context,
2290  target.offset(),
2291  value,
2292  scratch0(),
2294  kSaveFPRegs,
2296  check_needed);
2297  }
2298 
2299  __ bind(&skip_assignment);
2300 }
2301 
2302 
2303 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2304  Register object = ToRegister(instr->InputAt(0));
2305  Register result = ToRegister(instr->result());
2306  if (instr->hydrogen()->is_in_object()) {
2307  __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2308  } else {
2309  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2310  __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2311  }
2312 }
2313 
2314 
2315 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2316  Register object,
2317  Handle<Map> type,
2318  Handle<String> name,
2319  LEnvironment* env) {
2320  LookupResult lookup(isolate());
2321  type->LookupInDescriptors(NULL, *name, &lookup);
2322  ASSERT(lookup.IsFound() || lookup.IsCacheable());
2323  if (lookup.IsFound() && lookup.type() == FIELD) {
2324  int index = lookup.GetLocalFieldIndexFromMap(*type);
2325  int offset = index * kPointerSize;
2326  if (index < 0) {
2327  // Negative property indices are in-object properties, indexed
2328  // from the end of the fixed part of the object.
2329  __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
2330  } else {
2331  // Non-negative property indices are in the properties array.
2332  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2333  __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2334  }
2335  } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
2336  Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2337  __ LoadHeapObject(result, function);
2338  } else {
2339  // Negative lookup.
2340  // Check prototypes.
2341  HeapObject* current = HeapObject::cast((*type)->prototype());
2342  Heap* heap = type->GetHeap();
2343  while (current != heap->null_value()) {
2344  Handle<HeapObject> link(current);
2345  __ LoadHeapObject(result, link);
2346  __ lw(result, FieldMemOperand(result, HeapObject::kMapOffset));
2347  DeoptimizeIf(ne, env,
2348  result, Operand(Handle<Map>(JSObject::cast(current)->map())));
2349  current = HeapObject::cast(current->map()->prototype());
2350  }
2351  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2352  }
2353 }
2354 
2355 
2356 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2357  Register object = ToRegister(instr->object());
2358  Register result = ToRegister(instr->result());
2359  Register object_map = scratch0();
2360 
2361  int map_count = instr->hydrogen()->types()->length();
2362  bool need_generic = instr->hydrogen()->need_generic();
2363 
2364  if (map_count == 0 && !need_generic) {
2365  DeoptimizeIf(al, instr->environment());
2366  return;
2367  }
2368  Handle<String> name = instr->hydrogen()->name();
2369  Label done;
2370  __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2371  for (int i = 0; i < map_count; ++i) {
2372  bool last = (i == map_count - 1);
2373  Handle<Map> map = instr->hydrogen()->types()->at(i);
2374  Label check_passed;
2375  __ CompareMapAndBranch(
2376  object_map, map, &check_passed,
2377  eq, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
2378  if (last && !need_generic) {
2379  DeoptimizeIf(al, instr->environment());
2380  __ bind(&check_passed);
2381  EmitLoadFieldOrConstantFunction(
2382  result, object, map, name, instr->environment());
2383  } else {
2384  Label next;
2385  __ Branch(&next);
2386  __ bind(&check_passed);
2387  EmitLoadFieldOrConstantFunction(
2388  result, object, map, name, instr->environment());
2389  __ Branch(&done);
2390  __ bind(&next);
2391  }
2392  }
2393  if (need_generic) {
2394  __ li(a2, Operand(name));
2395  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2396  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2397  }
2398  __ bind(&done);
2399 }
2400 
2401 
2402 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2403  ASSERT(ToRegister(instr->object()).is(a0));
2404  ASSERT(ToRegister(instr->result()).is(v0));
2405 
2406  // Name is always in a2.
2407  __ li(a2, Operand(instr->name()));
2408  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2409  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2410 }
2411 
2412 
2413 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2414  Register scratch = scratch0();
2415  Register function = ToRegister(instr->function());
2416  Register result = ToRegister(instr->result());
2417 
2418  // Check that the function really is a function. Load map into the
2419  // result register.
2420  __ GetObjectType(function, result, scratch);
2421  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2422 
2423  // Make sure that the function has an instance prototype.
2424  Label non_instance;
2425  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2426  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2427  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2428 
2429  // Get the prototype or initial map from the function.
2430  __ lw(result,
2432 
2433  // Check that the function has a prototype or an initial map.
2434  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2435  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2436 
2437  // If the function does not have an initial map, we're done.
2438  Label done;
2439  __ GetObjectType(result, scratch, scratch);
2440  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2441 
2442  // Get the prototype from the initial map.
2443  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2444  __ Branch(&done);
2445 
2446  // Non-instance prototype: Fetch prototype from constructor field
2447  // in initial map.
2448  __ bind(&non_instance);
2449  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2450 
2451  // All done.
2452  __ bind(&done);
2453 }
2454 
2455 
2456 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2457  Register result = ToRegister(instr->result());
2458  Register input = ToRegister(instr->InputAt(0));
2459  Register scratch = scratch0();
2460 
2461  __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
2462  if (FLAG_debug_code) {
2463  Label done, fail;
2464  __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2465  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2466  __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
2467  __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
2468  __ Branch(&done, eq, scratch, Operand(at));
2469  // |scratch| still contains |input|'s map.
2470  __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2471  __ Ext(scratch, scratch, Map::kElementsKindShift,
2473  __ Branch(&fail, lt, scratch,
2474  Operand(GetInitialFastElementsKind()));
2475  __ Branch(&done, le, scratch,
2476  Operand(TERMINAL_FAST_ELEMENTS_KIND));
2477  __ Branch(&fail, lt, scratch,
2479  __ Branch(&done, le, scratch,
2481  __ bind(&fail);
2482  __ Abort("Check for fast or external elements failed.");
2483  __ bind(&done);
2484  }
2485 }
2486 
2487 
2488 void LCodeGen::DoLoadExternalArrayPointer(
2489  LLoadExternalArrayPointer* instr) {
2490  Register to_reg = ToRegister(instr->result());
2491  Register from_reg = ToRegister(instr->InputAt(0));
2492  __ lw(to_reg, FieldMemOperand(from_reg,
2494 }
2495 
2496 
2497 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2498  Register arguments = ToRegister(instr->arguments());
2499  Register length = ToRegister(instr->length());
2500  Register index = ToRegister(instr->index());
2501  Register result = ToRegister(instr->result());
2502 
2503  // Bailout index is not a valid argument index. Use unsigned check to get
2504  // negative check for free.
2505 
2506  // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
2507  // as they do in Arm. It will save us an instruction.
2508  DeoptimizeIf(ls, instr->environment(), length, Operand(index));
2509 
2510  // There are two words between the frame pointer and the last argument.
2511  // Subtracting from length accounts for one of them, add one more.
2512  __ subu(length, length, index);
2513  __ Addu(length, length, Operand(1));
2514  __ sll(length, length, kPointerSizeLog2);
2515  __ Addu(at, arguments, Operand(length));
2516  __ lw(result, MemOperand(at, 0));
2517 }
2518 
2519 
2520 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2521  Register elements = ToRegister(instr->elements());
2522  Register key = EmitLoadRegister(instr->key(), scratch0());
2523  Register result = ToRegister(instr->result());
2524  Register scratch = scratch0();
2525 
2526  // Load the result.
2527  __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
2528  __ addu(scratch, elements, scratch);
2529  uint32_t offset = FixedArray::kHeaderSize +
2530  (instr->additional_index() << kPointerSizeLog2);
2531  __ lw(result, FieldMemOperand(scratch, offset));
2532 
2533  // Check for the hole value.
2534  if (instr->hydrogen()->RequiresHoleCheck()) {
2535  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2536  __ And(scratch, result, Operand(kSmiTagMask));
2537  DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
2538  } else {
2539  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2540  DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
2541  }
2542  }
2543 }
2544 
2545 
2546 void LCodeGen::DoLoadKeyedFastDoubleElement(
2547  LLoadKeyedFastDoubleElement* instr) {
2548  Register elements = ToRegister(instr->elements());
2549  bool key_is_constant = instr->key()->IsConstantOperand();
2550  Register key = no_reg;
2551  DoubleRegister result = ToDoubleRegister(instr->result());
2552  Register scratch = scratch0();
2553 
2554  int shift_size =
2556  int constant_key = 0;
2557  if (key_is_constant) {
2558  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2559  if (constant_key & 0xF0000000) {
2560  Abort("array index constant value too big.");
2561  }
2562  } else {
2563  key = ToRegister(instr->key());
2564  }
2565 
2566  if (key_is_constant) {
2567  __ Addu(elements, elements,
2568  Operand(((constant_key + instr->additional_index()) << shift_size) +
2570  } else {
2571  __ sll(scratch, key, shift_size);
2572  __ Addu(elements, elements, Operand(scratch));
2573  __ Addu(elements, elements,
2575  (instr->additional_index() << shift_size)));
2576  }
2577 
2578  if (instr->hydrogen()->RequiresHoleCheck()) {
2579  __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2580  DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2581  }
2582 
2583  __ ldc1(result, MemOperand(elements));
2584 }
2585 
2586 
2587 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2588  LLoadKeyedSpecializedArrayElement* instr) {
2589  Register external_pointer = ToRegister(instr->external_pointer());
2590  Register key = no_reg;
2591  ElementsKind elements_kind = instr->elements_kind();
2592  bool key_is_constant = instr->key()->IsConstantOperand();
2593  int constant_key = 0;
2594  if (key_is_constant) {
2595  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2596  if (constant_key & 0xF0000000) {
2597  Abort("array index constant value too big.");
2598  }
2599  } else {
2600  key = ToRegister(instr->key());
2601  }
2602  int shift_size = ElementsKindToShiftSize(elements_kind);
2603  int additional_offset = instr->additional_index() << shift_size;
2604 
2605  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2606  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2607  FPURegister result = ToDoubleRegister(instr->result());
2608  if (key_is_constant) {
2609  __ Addu(scratch0(), external_pointer, constant_key << shift_size);
2610  } else {
2611  __ sll(scratch0(), key, shift_size);
2612  __ Addu(scratch0(), scratch0(), external_pointer);
2613  }
2614 
2615  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2616  __ lwc1(result, MemOperand(scratch0(), additional_offset));
2617  __ cvt_d_s(result, result);
2618  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2619  __ ldc1(result, MemOperand(scratch0(), additional_offset));
2620  }
2621  } else {
2622  Register result = ToRegister(instr->result());
2623  Register scratch = scratch0();
2624  if (instr->additional_index() != 0 && !key_is_constant) {
2625  __ Addu(scratch, key, instr->additional_index());
2626  }
2627  MemOperand mem_operand(zero_reg);
2628  if (key_is_constant) {
2629  mem_operand =
2630  MemOperand(external_pointer,
2631  (constant_key << shift_size) + additional_offset);
2632  } else {
2633  if (instr->additional_index() == 0) {
2634  __ sll(scratch, key, shift_size);
2635  } else {
2636  __ sll(scratch, scratch, shift_size);
2637  }
2638  __ Addu(scratch, scratch, external_pointer);
2639  mem_operand = MemOperand(scratch);
2640  }
2641  switch (elements_kind) {
2643  __ lb(result, mem_operand);
2644  break;
2647  __ lbu(result, mem_operand);
2648  break;
2650  __ lh(result, mem_operand);
2651  break;
2653  __ lhu(result, mem_operand);
2654  break;
2655  case EXTERNAL_INT_ELEMENTS:
2656  __ lw(result, mem_operand);
2657  break;
2659  __ lw(result, mem_operand);
2660  // TODO(danno): we could be more clever here, perhaps having a special
2661  // version of the stub that detects if the overflow case actually
2662  // happens, and generate code that returns a double rather than int.
2663  DeoptimizeIf(Ugreater_equal, instr->environment(),
2664  result, Operand(0x80000000));
2665  break;
2668  case FAST_DOUBLE_ELEMENTS:
2669  case FAST_ELEMENTS:
2670  case FAST_SMI_ELEMENTS:
2672  case FAST_HOLEY_ELEMENTS:
2674  case DICTIONARY_ELEMENTS:
2676  UNREACHABLE();
2677  break;
2678  }
2679  }
2680 }
2681 
2682 
2683 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2684  ASSERT(ToRegister(instr->object()).is(a1));
2685  ASSERT(ToRegister(instr->key()).is(a0));
2686 
2687  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2688  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2689 }
2690 
2691 
2692 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2693  Register scratch = scratch0();
2694  Register temp = scratch1();
2695  Register result = ToRegister(instr->result());
2696 
2697  if (instr->hydrogen()->from_inlined()) {
2698  __ Subu(result, sp, 2 * kPointerSize);
2699  } else {
2700  // Check if the calling frame is an arguments adaptor frame.
2701  Label done, adapted;
2704  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2705 
2706  // Result is the frame pointer for the frame if not adapted and for the real
2707  // frame below the adaptor frame if adapted.
2708  __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
2709  __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
2710  }
2711 }
2712 
2713 
2714 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2715  Register elem = ToRegister(instr->InputAt(0));
2716  Register result = ToRegister(instr->result());
2717 
2718  Label done;
2719 
2720  // If no arguments adaptor frame the number of arguments is fixed.
2721  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
2722  __ Branch(&done, eq, fp, Operand(elem));
2723 
2724  // Arguments adaptor frame present. Get argument length from there.
2726  __ lw(result,
2728  __ SmiUntag(result);
2729 
2730  // Argument length is in result register.
2731  __ bind(&done);
2732 }
2733 
2734 
2735 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2736  Register receiver = ToRegister(instr->receiver());
2737  Register function = ToRegister(instr->function());
2738  Register scratch = scratch0();
2739 
2740  // If the receiver is null or undefined, we have to pass the global
2741  // object as a receiver to normal functions. Values have to be
2742  // passed unchanged to builtins and strict-mode functions.
2743  Label global_object, receiver_ok;
2744 
2745  // Do not transform the receiver to object for strict mode
2746  // functions.
2747  __ lw(scratch,
2749  __ lw(scratch,
2751 
2752  // Do not transform the receiver to object for builtins.
2753  int32_t strict_mode_function_mask =
2755  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2756  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
2757  __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
2758 
2759  // Normal function. Replace undefined or null with global receiver.
2760  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2761  __ Branch(&global_object, eq, receiver, Operand(scratch));
2762  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2763  __ Branch(&global_object, eq, receiver, Operand(scratch));
2764 
2765  // Deoptimize if the receiver is not a JS object.
2766  __ And(scratch, receiver, Operand(kSmiTagMask));
2767  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
2768 
2769  __ GetObjectType(receiver, scratch, scratch);
2770  DeoptimizeIf(lt, instr->environment(),
2771  scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
2772  __ Branch(&receiver_ok);
2773 
2774  __ bind(&global_object);
2775  __ lw(receiver, GlobalObjectOperand());
2776  __ lw(receiver,
2778  __ bind(&receiver_ok);
2779 }
2780 
2781 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2782  Register receiver = ToRegister(instr->receiver());
2783  Register function = ToRegister(instr->function());
2784  Register length = ToRegister(instr->length());
2785  Register elements = ToRegister(instr->elements());
2786  Register scratch = scratch0();
2787  ASSERT(receiver.is(a0)); // Used for parameter count.
2788  ASSERT(function.is(a1)); // Required by InvokeFunction.
2789  ASSERT(ToRegister(instr->result()).is(v0));
2790 
2791  // Copy the arguments to this function possibly from the
2792  // adaptor frame below it.
2793  const uint32_t kArgumentsLimit = 1 * KB;
2794  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
2795 
2796  // Push the receiver and use the register to keep the original
2797  // number of arguments.
2798  __ push(receiver);
2799  __ Move(receiver, length);
2800  // The arguments are at a one pointer size offset from elements.
2801  __ Addu(elements, elements, Operand(1 * kPointerSize));
2802 
2803  // Loop through the arguments pushing them onto the execution
2804  // stack.
2805  Label invoke, loop;
2806  // length is a small non-negative integer, due to the test above.
2807  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
2808  __ sll(scratch, length, 2);
2809  __ bind(&loop);
2810  __ Addu(scratch, elements, scratch);
2811  __ lw(scratch, MemOperand(scratch));
2812  __ push(scratch);
2813  __ Subu(length, length, Operand(1));
2814  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
2815  __ sll(scratch, length, 2);
2816 
2817  __ bind(&invoke);
2818  ASSERT(instr->HasPointerMap());
2819  LPointerMap* pointers = instr->pointer_map();
2820  RecordPosition(pointers->position());
2821  SafepointGenerator safepoint_generator(
2822  this, pointers, Safepoint::kLazyDeopt);
2823  // The number of arguments is stored in receiver which is a0, as expected
2824  // by InvokeFunction.
2825  ParameterCount actual(receiver);
2826  __ InvokeFunction(function, actual, CALL_FUNCTION,
2827  safepoint_generator, CALL_AS_METHOD);
2829 }
2830 
2831 
2832 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2833  LOperand* argument = instr->InputAt(0);
2834  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2835  Abort("DoPushArgument not implemented for double type.");
2836  } else {
2837  Register argument_reg = EmitLoadRegister(argument, at);
2838  __ push(argument_reg);
2839  }
2840 }
2841 
2842 
2843 void LCodeGen::DoDrop(LDrop* instr) {
2844  __ Drop(instr->count());
2845 }
2846 
2847 
2848 void LCodeGen::DoThisFunction(LThisFunction* instr) {
2849  Register result = ToRegister(instr->result());
2850  __ LoadHeapObject(result, instr->hydrogen()->closure());
2851 }
2852 
2853 
2854 void LCodeGen::DoContext(LContext* instr) {
2855  Register result = ToRegister(instr->result());
2856  __ mov(result, cp);
2857 }
2858 
2859 
2860 void LCodeGen::DoOuterContext(LOuterContext* instr) {
2861  Register context = ToRegister(instr->context());
2862  Register result = ToRegister(instr->result());
2863  __ lw(result,
2865 }
2866 
2867 
2868 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2869  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
2870  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
2871  // The context is the first argument.
2872  __ Push(cp, scratch0(), scratch1());
2873  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
2874 }
2875 
2876 
2877 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2878  Register result = ToRegister(instr->result());
2879  __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
2880 }
2881 
2882 
2883 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2884  Register global = ToRegister(instr->global());
2885  Register result = ToRegister(instr->result());
2887 }
2888 
2889 
2890 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2891  int arity,
2892  LInstruction* instr,
2893  CallKind call_kind,
2894  A1State a1_state) {
2895  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
2896  function->shared()->formal_parameter_count() == arity;
2897 
2898  LPointerMap* pointers = instr->pointer_map();
2899  RecordPosition(pointers->position());
2900 
2901  if (can_invoke_directly) {
2902  if (a1_state == A1_UNINITIALIZED) {
2903  __ LoadHeapObject(a1, function);
2904  }
2905 
2906  // Change context if needed.
2907  bool change_context =
2908  (info()->closure()->context() != function->context()) ||
2909  scope()->contains_with() ||
2910  (scope()->num_heap_slots() > 0);
2911  if (change_context) {
2913  }
2914 
2915  // Set r0 to arguments count if adaption is not needed. Assumes that r0
2916  // is available to write to at this point.
2917  if (!function->NeedsArgumentsAdaption()) {
2918  __ li(a0, Operand(arity));
2919  }
2920 
2921  // Invoke function.
2922  __ SetCallKind(t1, call_kind);
2924  __ Call(at);
2925 
2926  // Set up deoptimization.
2927  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2928  } else {
2929  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2930  ParameterCount count(arity);
2931  __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
2932  }
2933 
2934  // Restore context.
2936 }
2937 
2938 
2939 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2940  ASSERT(ToRegister(instr->result()).is(v0));
2941  __ mov(a0, v0);
2942  CallKnownFunction(instr->function(),
2943  instr->arity(),
2944  instr,
2946  A1_UNINITIALIZED);
2947 }
2948 
2949 
2950 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2951  Register input = ToRegister(instr->InputAt(0));
2952  Register result = ToRegister(instr->result());
2953  Register scratch = scratch0();
2954 
2955  // Deoptimize if not a heap number.
2956  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2957  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2958  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
2959 
2960  Label done;
2961  Register exponent = scratch0();
2962  scratch = no_reg;
2963  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2964  // Check the sign of the argument. If the argument is positive, just
2965  // return it.
2966  __ Move(result, input);
2967  __ And(at, exponent, Operand(HeapNumber::kSignMask));
2968  __ Branch(&done, eq, at, Operand(zero_reg));
2969 
2970  // Input is negative. Reverse its sign.
2971  // Preserve the value of all registers.
2972  {
2973  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2974 
2975  // Registers were saved at the safepoint, so we can use
2976  // many scratch registers.
2977  Register tmp1 = input.is(a1) ? a0 : a1;
2978  Register tmp2 = input.is(a2) ? a0 : a2;
2979  Register tmp3 = input.is(a3) ? a0 : a3;
2980  Register tmp4 = input.is(t0) ? a0 : t0;
2981 
2982  // exponent: floating point exponent value.
2983 
2984  Label allocated, slow;
2985  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
2986  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
2987  __ Branch(&allocated);
2988 
2989  // Slow case: Call the runtime system to do the number allocation.
2990  __ bind(&slow);
2991 
2992  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2993  // Set the pointer to the new heap number in tmp.
2994  if (!tmp1.is(v0))
2995  __ mov(tmp1, v0);
2996  // Restore input_reg after call to runtime.
2997  __ LoadFromSafepointRegisterSlot(input, input);
2998  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2999 
3000  __ bind(&allocated);
3001  // exponent: floating point exponent value.
3002  // tmp1: allocated heap number.
3003  __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3004  __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3007 
3008  __ StoreToSafepointRegisterSlot(tmp1, result);
3009  }
3010 
3011  __ bind(&done);
3012 }
3013 
3014 
3015 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3016  Register input = ToRegister(instr->InputAt(0));
3017  Register result = ToRegister(instr->result());
3018  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3019  Label done;
3020  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3021  __ mov(result, input);
3022  ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
3023  __ subu(result, zero_reg, input);
3024  // Overflow if result is still negative, i.e. 0x80000000.
3025  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
3026  __ bind(&done);
3027 }
3028 
3029 
3030 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3031  // Class for deferred case.
3032  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3033  public:
3034  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3035  LUnaryMathOperation* instr)
3036  : LDeferredCode(codegen), instr_(instr) { }
3037  virtual void Generate() {
3038  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3039  }
3040  virtual LInstruction* instr() { return instr_; }
3041  private:
3042  LUnaryMathOperation* instr_;
3043  };
3044 
3045  Representation r = instr->hydrogen()->value()->representation();
3046  if (r.IsDouble()) {
3047  FPURegister input = ToDoubleRegister(instr->InputAt(0));
3048  FPURegister result = ToDoubleRegister(instr->result());
3049  __ abs_d(result, input);
3050  } else if (r.IsInteger32()) {
3051  EmitIntegerMathAbs(instr);
3052  } else {
3053  // Representation is tagged.
3054  DeferredMathAbsTaggedHeapNumber* deferred =
3055  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3056  Register input = ToRegister(instr->InputAt(0));
3057  // Smi check.
3058  __ JumpIfNotSmi(input, deferred->entry());
3059  // If smi, handle it directly.
3060  EmitIntegerMathAbs(instr);
3061  __ bind(deferred->exit());
3062  }
3063 }
3064 
3065 
3066 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3067  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3068  Register result = ToRegister(instr->result());
3069  FPURegister single_scratch = double_scratch0().low();
3070  Register scratch1 = scratch0();
3071  Register except_flag = ToRegister(instr->TempAt(0));
3072 
3073  __ EmitFPUTruncate(kRoundToMinusInf,
3074  single_scratch,
3075  input,
3076  scratch1,
3077  except_flag);
3078 
3079  // Deopt if the operation did not succeed.
3080  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3081 
3082  // Load the result.
3083  __ mfc1(result, single_scratch);
3084 
3085  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3086  // Test for -0.
3087  Label done;
3088  __ Branch(&done, ne, result, Operand(zero_reg));
3089  __ mfc1(scratch1, input.high());
3090  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3091  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3092  __ bind(&done);
3093  }
3094 }
3095 
3096 
3097 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3098  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3099  Register result = ToRegister(instr->result());
3100  Register scratch = scratch0();
3101  Label done, check_sign_on_zero;
3102 
3103  // Extract exponent bits.
3104  __ mfc1(result, input.high());
3105  __ Ext(scratch,
3106  result,
3109 
3110  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3111  Label skip1;
3112  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3113  __ mov(result, zero_reg);
3114  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3115  __ Branch(&check_sign_on_zero);
3116  } else {
3117  __ Branch(&done);
3118  }
3119  __ bind(&skip1);
3120 
3121  // The following conversion will not work with numbers
3122  // outside of ]-2^32, 2^32[.
3123  DeoptimizeIf(ge, instr->environment(), scratch,
3124  Operand(HeapNumber::kExponentBias + 32));
3125 
3126  // Save the original sign for later comparison.
3127  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3128 
3129  __ Move(double_scratch0(), 0.5);
3130  __ add_d(double_scratch0(), input, double_scratch0());
3131 
3132  // Check sign of the result: if the sign changed, the input
3133  // value was in ]0.5, 0[ and the result should be -0.
3134  __ mfc1(result, double_scratch0().high());
3135  __ Xor(result, result, Operand(scratch));
3136  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3137  // ARM uses 'mi' here, which is 'lt'
3138  DeoptimizeIf(lt, instr->environment(), result,
3139  Operand(zero_reg));
3140  } else {
3141  Label skip2;
3142  // ARM uses 'mi' here, which is 'lt'
3143  // Negating it results in 'ge'
3144  __ Branch(&skip2, ge, result, Operand(zero_reg));
3145  __ mov(result, zero_reg);
3146  __ Branch(&done);
3147  __ bind(&skip2);
3148  }
3149 
3150  Register except_flag = scratch;
3151 
3152  __ EmitFPUTruncate(kRoundToMinusInf,
3153  double_scratch0().low(),
3154  double_scratch0(),
3155  result,
3156  except_flag);
3157 
3158  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3159 
3160  __ mfc1(result, double_scratch0().low());
3161 
3162  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3163  // Test for -0.
3164  __ Branch(&done, ne, result, Operand(zero_reg));
3165  __ bind(&check_sign_on_zero);
3166  __ mfc1(scratch, input.high());
3167  __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3168  DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3169  }
3170  __ bind(&done);
3171 }
3172 
3173 
3174 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3175  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3176  DoubleRegister result = ToDoubleRegister(instr->result());
3177  __ sqrt_d(result, input);
3178 }
3179 
3180 
3181 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3182  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3183  DoubleRegister result = ToDoubleRegister(instr->result());
3184  DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
3185 
3186  ASSERT(!input.is(result));
3187 
3188  // Note that according to ECMA-262 15.8.2.13:
3189  // Math.pow(-Infinity, 0.5) == Infinity
3190  // Math.sqrt(-Infinity) == NaN
3191  Label done;
3192  __ Move(temp, -V8_INFINITY);
3193  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3194  // Set up Infinity in the delay slot.
3195  // result is overwritten if the branch is not taken.
3196  __ neg_d(result, temp);
3197 
3198  // Add +0 to convert -0 to +0.
3199  __ add_d(result, input, kDoubleRegZero);
3200  __ sqrt_d(result, result);
3201  __ bind(&done);
3202 }
3203 
3204 
3205 void LCodeGen::DoPower(LPower* instr) {
3206  Representation exponent_type = instr->hydrogen()->right()->representation();
3207  // Having marked this as a call, we can use any registers.
3208  // Just make sure that the input/output registers are the expected ones.
3209  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3210  ToDoubleRegister(instr->InputAt(1)).is(f4));
3211  ASSERT(!instr->InputAt(1)->IsRegister() ||
3212  ToRegister(instr->InputAt(1)).is(a2));
3213  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
3214  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3215 
3216  if (exponent_type.IsTagged()) {
3217  Label no_deopt;
3218  __ JumpIfSmi(a2, &no_deopt);
3220  DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
3221  __ bind(&no_deopt);
3222  MathPowStub stub(MathPowStub::TAGGED);
3223  __ CallStub(&stub);
3224  } else if (exponent_type.IsInteger32()) {
3225  MathPowStub stub(MathPowStub::INTEGER);
3226  __ CallStub(&stub);
3227  } else {
3228  ASSERT(exponent_type.IsDouble());
3229  MathPowStub stub(MathPowStub::DOUBLE);
3230  __ CallStub(&stub);
3231  }
3232 }
3233 
3234 
3235 void LCodeGen::DoRandom(LRandom* instr) {
3236  class DeferredDoRandom: public LDeferredCode {
3237  public:
3238  DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3239  : LDeferredCode(codegen), instr_(instr) { }
3240  virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3241  virtual LInstruction* instr() { return instr_; }
3242  private:
3243  LRandom* instr_;
3244  };
3245 
3246  DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3247  // Having marked this instruction as a call we can use any
3248  // registers.
3249  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3250  ASSERT(ToRegister(instr->InputAt(0)).is(a0));
3251 
3252  static const int kSeedSize = sizeof(uint32_t);
3253  STATIC_ASSERT(kPointerSize == kSeedSize);
3254 
3256  static const int kRandomSeedOffset =
3258  __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
3259  // a2: FixedArray of the global context's random seeds
3260 
3261  // Load state[0].
3263  __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
3264  // Load state[1].
3265  __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3266  // a1: state[0].
3267  // a0: state[1].
3268 
3269  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3270  __ And(a3, a1, Operand(0xFFFF));
3271  __ li(t0, Operand(18273));
3272  __ Mul(a3, a3, t0);
3273  __ srl(a1, a1, 16);
3274  __ Addu(a1, a3, a1);
3275  // Save state[0].
3277 
3278  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3279  __ And(a3, a0, Operand(0xFFFF));
3280  __ li(t0, Operand(36969));
3281  __ Mul(a3, a3, t0);
3282  __ srl(a0, a0, 16),
3283  __ Addu(a0, a3, a0);
3284  // Save state[1].
3285  __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3286 
3287  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3288  __ And(a0, a0, Operand(0x3FFFF));
3289  __ sll(a1, a1, 14);
3290  __ Addu(v0, a0, a1);
3291 
3292  __ bind(deferred->exit());
3293 
3294  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3295  __ li(a2, Operand(0x41300000));
3296  // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
3297  __ Move(f12, v0, a2);
3298  // Move 0x4130000000000000 to FPU.
3299  __ Move(f14, zero_reg, a2);
3300  // Subtract to get the result.
3301  __ sub_d(f0, f12, f14);
3302 }
3303 
3304 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3305  __ PrepareCallCFunction(1, scratch0());
3306  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3307  // Return value is in v0.
3308 }
3309 
3310 
3311 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3312  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3313  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3315  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3316 }
3317 
3318 
3319 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3320  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3321  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3323  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3324 }
3325 
3326 
3327 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3328  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3329  TranscendentalCacheStub stub(TranscendentalCache::COS,
3331  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3332 }
3333 
3334 
3335 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3336  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3337  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3339  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3340 }
3341 
3342 
3343 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3344  switch (instr->op()) {
3345  case kMathAbs:
3346  DoMathAbs(instr);
3347  break;
3348  case kMathFloor:
3349  DoMathFloor(instr);
3350  break;
3351  case kMathRound:
3352  DoMathRound(instr);
3353  break;
3354  case kMathSqrt:
3355  DoMathSqrt(instr);
3356  break;
3357  case kMathPowHalf:
3358  DoMathPowHalf(instr);
3359  break;
3360  case kMathCos:
3361  DoMathCos(instr);
3362  break;
3363  case kMathSin:
3364  DoMathSin(instr);
3365  break;
3366  case kMathTan:
3367  DoMathTan(instr);
3368  break;
3369  case kMathLog:
3370  DoMathLog(instr);
3371  break;
3372  default:
3373  Abort("Unimplemented type of LUnaryMathOperation.");
3374  UNREACHABLE();
3375  }
3376 }
3377 
3378 
3379 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3380  ASSERT(ToRegister(instr->function()).is(a1));
3381  ASSERT(instr->HasPointerMap());
3382 
3383  if (instr->known_function().is_null()) {
3384  LPointerMap* pointers = instr->pointer_map();
3385  RecordPosition(pointers->position());
3386  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3387  ParameterCount count(instr->arity());
3388  __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3390  } else {
3391  CallKnownFunction(instr->known_function(),
3392  instr->arity(),
3393  instr,
3395  A1_CONTAINS_TARGET);
3396  }
3397 }
3398 
3399 
3400 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3401  ASSERT(ToRegister(instr->result()).is(v0));
3402 
3403  int arity = instr->arity();
3404  Handle<Code> ic =
3405  isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3406  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3408 }
3409 
3410 
3411 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3412  ASSERT(ToRegister(instr->result()).is(v0));
3413 
3414  int arity = instr->arity();
3415  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3416  Handle<Code> ic =
3417  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3418  __ li(a2, Operand(instr->name()));
3419  CallCode(ic, mode, instr);
3420  // Restore context register.
3422 }
3423 
3424 
3425 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3426  ASSERT(ToRegister(instr->function()).is(a1));
3427  ASSERT(ToRegister(instr->result()).is(v0));
3428 
3429  int arity = instr->arity();
3430  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3431  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3433 }
3434 
3435 
3436 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3437  ASSERT(ToRegister(instr->result()).is(v0));
3438 
3439  int arity = instr->arity();
3440  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3441  Handle<Code> ic =
3442  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3443  __ li(a2, Operand(instr->name()));
3444  CallCode(ic, mode, instr);
3446 }
3447 
3448 
3449 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3450  ASSERT(ToRegister(instr->result()).is(v0));
3451  CallKnownFunction(instr->target(),
3452  instr->arity(),
3453  instr,
3455  A1_UNINITIALIZED);
3456 }
3457 
3458 
3459 void LCodeGen::DoCallNew(LCallNew* instr) {
3460  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
3461  ASSERT(ToRegister(instr->result()).is(v0));
3462 
3463  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3464  __ li(a0, Operand(instr->arity()));
3465  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3466 }
3467 
3468 
3469 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3470  CallRuntime(instr->function(), instr->arity(), instr);
3471 }
3472 
3473 
3474 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3475  Register object = ToRegister(instr->object());
3476  Register value = ToRegister(instr->value());
3477  Register scratch = scratch0();
3478  int offset = instr->offset();
3479 
3480  ASSERT(!object.is(value));
3481 
3482  if (!instr->transition().is_null()) {
3483  __ li(scratch, Operand(instr->transition()));
3484  __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3485  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3486  Register temp = ToRegister(instr->TempAt(0));
3487  // Update the write barrier for the map field.
3488  __ RecordWriteField(object,
3490  scratch,
3491  temp,
3493  kSaveFPRegs,
3495  OMIT_SMI_CHECK);
3496  }
3497  }
3498 
3499  // Do the store.
3500  HType type = instr->hydrogen()->value()->type();
3501  SmiCheck check_needed =
3502  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3503  if (instr->is_in_object()) {
3504  __ sw(value, FieldMemOperand(object, offset));
3505  if (instr->hydrogen()->NeedsWriteBarrier()) {
3506  // Update the write barrier for the object for in-object properties.
3507  __ RecordWriteField(object,
3508  offset,
3509  value,
3510  scratch,
3512  kSaveFPRegs,
3514  check_needed);
3515  }
3516  } else {
3517  __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3518  __ sw(value, FieldMemOperand(scratch, offset));
3519  if (instr->hydrogen()->NeedsWriteBarrier()) {
3520  // Update the write barrier for the properties array.
3521  // object is used as a scratch register.
3522  __ RecordWriteField(scratch,
3523  offset,
3524  value,
3525  object,
3527  kSaveFPRegs,
3529  check_needed);
3530  }
3531  }
3532 }
3533 
3534 
3535 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3536  ASSERT(ToRegister(instr->object()).is(a1));
3537  ASSERT(ToRegister(instr->value()).is(a0));
3538 
3539  // Name is always in a2.
3540  __ li(a2, Operand(instr->name()));
3541  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3542  ? isolate()->builtins()->StoreIC_Initialize_Strict()
3543  : isolate()->builtins()->StoreIC_Initialize();
3544  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3545 }
3546 
3547 
3548 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3549  DeoptimizeIf(hs,
3550  instr->environment(),
3551  ToRegister(instr->index()),
3552  Operand(ToRegister(instr->length())));
3553 }
3554 
3555 
3556 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3557  Register value = ToRegister(instr->value());
3558  Register elements = ToRegister(instr->object());
3559  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3560  Register scratch = scratch0();
3561 
3562  // Do the store.
3563  if (instr->key()->IsConstantOperand()) {
3564  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3565  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3566  int offset =
3567  (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
3569  __ sw(value, FieldMemOperand(elements, offset));
3570  } else {
3571  __ sll(scratch, key, kPointerSizeLog2);
3572  __ addu(scratch, elements, scratch);
3573  if (instr->additional_index() != 0) {
3574  __ Addu(scratch,
3575  scratch,
3576  instr->additional_index() << kPointerSizeLog2);
3577  }
3578  __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3579  }
3580 
3581  if (instr->hydrogen()->NeedsWriteBarrier()) {
3582  HType type = instr->hydrogen()->value()->type();
3583  SmiCheck check_needed =
3584  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3585  // Compute address of modified element and store it into key register.
3586  __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3587  __ RecordWrite(elements,
3588  key,
3589  value,
3591  kSaveFPRegs,
3593  check_needed);
3594  }
3595 }
3596 
3597 
3598 void LCodeGen::DoStoreKeyedFastDoubleElement(
3599  LStoreKeyedFastDoubleElement* instr) {
3600  DoubleRegister value = ToDoubleRegister(instr->value());
3601  Register elements = ToRegister(instr->elements());
3602  Register key = no_reg;
3603  Register scratch = scratch0();
3604  bool key_is_constant = instr->key()->IsConstantOperand();
3605  int constant_key = 0;
3606  Label not_nan;
3607 
3608  // Calculate the effective address of the slot in the array to store the
3609  // double value.
3610  if (key_is_constant) {
3611  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3612  if (constant_key & 0xF0000000) {
3613  Abort("array index constant value too big.");
3614  }
3615  } else {
3616  key = ToRegister(instr->key());
3617  }
3619  if (key_is_constant) {
3620  __ Addu(scratch, elements, Operand((constant_key << shift_size) +
3622  } else {
3623  __ sll(scratch, key, shift_size);
3624  __ Addu(scratch, elements, Operand(scratch));
3625  __ Addu(scratch, scratch,
3627  }
3628 
3629  if (instr->NeedsCanonicalization()) {
3630  Label is_nan;
3631  // Check for NaN. All NaNs must be canonicalized.
3632  __ BranchF(NULL, &is_nan, eq, value, value);
3633  __ Branch(&not_nan);
3634 
3635  // Only load canonical NaN if the comparison above set the overflow.
3636  __ bind(&is_nan);
3638  }
3639 
3640  __ bind(&not_nan);
3641  __ sdc1(value, MemOperand(scratch, instr->additional_index() << shift_size));
3642 }
3643 
3644 
3645 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3646  LStoreKeyedSpecializedArrayElement* instr) {
3647 
3648  Register external_pointer = ToRegister(instr->external_pointer());
3649  Register key = no_reg;
3650  ElementsKind elements_kind = instr->elements_kind();
3651  bool key_is_constant = instr->key()->IsConstantOperand();
3652  int constant_key = 0;
3653  if (key_is_constant) {
3654  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3655  if (constant_key & 0xF0000000) {
3656  Abort("array index constant value too big.");
3657  }
3658  } else {
3659  key = ToRegister(instr->key());
3660  }
3661  int shift_size = ElementsKindToShiftSize(elements_kind);
3662  int additional_offset = instr->additional_index() << shift_size;
3663 
3664  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3665  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3666  FPURegister value(ToDoubleRegister(instr->value()));
3667  if (key_is_constant) {
3668  __ Addu(scratch0(), external_pointer, constant_key << shift_size);
3669  } else {
3670  __ sll(scratch0(), key, shift_size);
3671  __ Addu(scratch0(), scratch0(), external_pointer);
3672  }
3673 
3674  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3675  __ cvt_s_d(double_scratch0(), value);
3676  __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
3677  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3678  __ sdc1(value, MemOperand(scratch0(), additional_offset));
3679  }
3680  } else {
3681  Register value(ToRegister(instr->value()));
3682  Register scratch = scratch0();
3683  if (instr->additional_index() != 0 && !key_is_constant) {
3684  __ Addu(scratch, key, instr->additional_index());
3685  }
3686  MemOperand mem_operand(zero_reg);
3687  if (key_is_constant) {
3688  mem_operand = MemOperand(external_pointer,
3689  ((constant_key + instr->additional_index())
3690  << shift_size));
3691  } else {
3692  if (instr->additional_index() == 0) {
3693  __ sll(scratch, key, shift_size);
3694  } else {
3695  __ sll(scratch, scratch, shift_size);
3696  }
3697  __ Addu(scratch, scratch, external_pointer);
3698  mem_operand = MemOperand(scratch);
3699  }
3700  switch (elements_kind) {
3704  __ sb(value, mem_operand);
3705  break;
3708  __ sh(value, mem_operand);
3709  break;
3710  case EXTERNAL_INT_ELEMENTS:
3712  __ sw(value, mem_operand);
3713  break;
3716  case FAST_DOUBLE_ELEMENTS:
3717  case FAST_ELEMENTS:
3718  case FAST_SMI_ELEMENTS:
3720  case FAST_HOLEY_ELEMENTS:
3722  case DICTIONARY_ELEMENTS:
3724  UNREACHABLE();
3725  break;
3726  }
3727  }
3728 }
3729 
3730 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3731  ASSERT(ToRegister(instr->object()).is(a2));
3732  ASSERT(ToRegister(instr->key()).is(a1));
3733  ASSERT(ToRegister(instr->value()).is(a0));
3734 
3735  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3736  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3737  : isolate()->builtins()->KeyedStoreIC_Initialize();
3738  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3739 }
3740 
3741 
3742 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3743  Register object_reg = ToRegister(instr->object());
3744  Register new_map_reg = ToRegister(instr->new_map_reg());
3745  Register scratch = scratch0();
3746 
3747  Handle<Map> from_map = instr->original_map();
3748  Handle<Map> to_map = instr->transitioned_map();
3749  ElementsKind from_kind = from_map->elements_kind();
3750  ElementsKind to_kind = to_map->elements_kind();
3751 
3752  __ mov(ToRegister(instr->result()), object_reg);
3753 
3754  Label not_applicable;
3755  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3756  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
3757 
3758  __ li(new_map_reg, Operand(to_map));
3759  if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
3760  __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3761  // Write barrier.
3762  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3763  scratch, kRAHasBeenSaved, kDontSaveFPRegs);
3764  } else if (IsFastSmiElementsKind(from_kind) &&
3765  IsFastDoubleElementsKind(to_kind)) {
3766  Register fixed_object_reg = ToRegister(instr->temp_reg());
3767  ASSERT(fixed_object_reg.is(a2));
3768  ASSERT(new_map_reg.is(a3));
3769  __ mov(fixed_object_reg, object_reg);
3770  CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3771  RelocInfo::CODE_TARGET, instr);
3772  } else if (IsFastDoubleElementsKind(from_kind) &&
3773  IsFastObjectElementsKind(to_kind)) {
3774  Register fixed_object_reg = ToRegister(instr->temp_reg());
3775  ASSERT(fixed_object_reg.is(a2));
3776  ASSERT(new_map_reg.is(a3));
3777  __ mov(fixed_object_reg, object_reg);
3778  CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3779  RelocInfo::CODE_TARGET, instr);
3780  } else {
3781  UNREACHABLE();
3782  }
3783  __ bind(&not_applicable);
3784 }
3785 
3786 
3787 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3788  __ push(ToRegister(instr->left()));
3789  __ push(ToRegister(instr->right()));
3790  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3791  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3792 }
3793 
3794 
3795 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3796  class DeferredStringCharCodeAt: public LDeferredCode {
3797  public:
3798  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3799  : LDeferredCode(codegen), instr_(instr) { }
3800  virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3801  virtual LInstruction* instr() { return instr_; }
3802  private:
3803  LStringCharCodeAt* instr_;
3804  };
3805 
3806  DeferredStringCharCodeAt* deferred =
3807  new(zone()) DeferredStringCharCodeAt(this, instr);
3809  ToRegister(instr->string()),
3810  ToRegister(instr->index()),
3811  ToRegister(instr->result()),
3812  deferred->entry());
3813  __ bind(deferred->exit());
3814 }
3815 
3816 
3817 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3818  Register string = ToRegister(instr->string());
3819  Register result = ToRegister(instr->result());
3820  Register scratch = scratch0();
3821 
3822  // TODO(3095996): Get rid of this. For now, we need to make the
3823  // result register contain a valid pointer because it is already
3824  // contained in the register pointer map.
3825  __ mov(result, zero_reg);
3826 
3827  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3828  __ push(string);
3829  // Push the index as a smi. This is safe because of the checks in
3830  // DoStringCharCodeAt above.
3831  if (instr->index()->IsConstantOperand()) {
3832  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3833  __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
3834  __ push(scratch);
3835  } else {
3836  Register index = ToRegister(instr->index());
3837  __ SmiTag(index);
3838  __ push(index);
3839  }
3840  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3841  if (FLAG_debug_code) {
3842  __ AbortIfNotSmi(v0);
3843  }
3844  __ SmiUntag(v0);
3845  __ StoreToSafepointRegisterSlot(v0, result);
3846 }
3847 
3848 
3849 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3850  class DeferredStringCharFromCode: public LDeferredCode {
3851  public:
3852  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3853  : LDeferredCode(codegen), instr_(instr) { }
3854  virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3855  virtual LInstruction* instr() { return instr_; }
3856  private:
3857  LStringCharFromCode* instr_;
3858  };
3859 
3860  DeferredStringCharFromCode* deferred =
3861  new(zone()) DeferredStringCharFromCode(this, instr);
3862 
3863  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3864  Register char_code = ToRegister(instr->char_code());
3865  Register result = ToRegister(instr->result());
3866  Register scratch = scratch0();
3867  ASSERT(!char_code.is(result));
3868 
3869  __ Branch(deferred->entry(), hi,
3870  char_code, Operand(String::kMaxAsciiCharCode));
3871  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3872  __ sll(scratch, char_code, kPointerSizeLog2);
3873  __ Addu(result, result, scratch);
3874  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
3875  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3876  __ Branch(deferred->entry(), eq, result, Operand(scratch));
3877  __ bind(deferred->exit());
3878 }
3879 
3880 
3881 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3882  Register char_code = ToRegister(instr->char_code());
3883  Register result = ToRegister(instr->result());
3884 
3885  // TODO(3095996): Get rid of this. For now, we need to make the
3886  // result register contain a valid pointer because it is already
3887  // contained in the register pointer map.
3888  __ mov(result, zero_reg);
3889 
3890  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3891  __ SmiTag(char_code);
3892  __ push(char_code);
3893  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3894  __ StoreToSafepointRegisterSlot(v0, result);
3895 }
3896 
3897 
3898 void LCodeGen::DoStringLength(LStringLength* instr) {
3899  Register string = ToRegister(instr->InputAt(0));
3900  Register result = ToRegister(instr->result());
3901  __ lw(result, FieldMemOperand(string, String::kLengthOffset));
3902 }
3903 
3904 
3905 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3906  LOperand* input = instr->InputAt(0);
3907  ASSERT(input->IsRegister() || input->IsStackSlot());
3908  LOperand* output = instr->result();
3909  ASSERT(output->IsDoubleRegister());
3910  FPURegister single_scratch = double_scratch0().low();
3911  if (input->IsStackSlot()) {
3912  Register scratch = scratch0();
3913  __ lw(scratch, ToMemOperand(input));
3914  __ mtc1(scratch, single_scratch);
3915  } else {
3916  __ mtc1(ToRegister(input), single_scratch);
3917  }
3918  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
3919 }
3920 
3921 
3922 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3923  class DeferredNumberTagI: public LDeferredCode {
3924  public:
3925  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3926  : LDeferredCode(codegen), instr_(instr) { }
3927  virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3928  virtual LInstruction* instr() { return instr_; }
3929  private:
3930  LNumberTagI* instr_;
3931  };
3932 
3933  Register src = ToRegister(instr->InputAt(0));
3934  Register dst = ToRegister(instr->result());
3935  Register overflow = scratch0();
3936 
3937  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
3938  __ SmiTagCheckOverflow(dst, src, overflow);
3939  __ BranchOnOverflow(deferred->entry(), overflow);
3940  __ bind(deferred->exit());
3941 }
3942 
3943 
3944 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3945  Label slow;
3946  Register src = ToRegister(instr->InputAt(0));
3947  Register dst = ToRegister(instr->result());
3948  FPURegister dbl_scratch = double_scratch0();
3949 
3950  // Preserve the value of all registers.
3951  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3952 
3953  // There was overflow, so bits 30 and 31 of the original integer
3954  // disagree. Try to allocate a heap number in new space and store
3955  // the value in there. If that fails, call the runtime system.
3956  Label done;
3957  if (dst.is(src)) {
3958  __ SmiUntag(src, dst);
3959  __ Xor(src, src, Operand(0x80000000));
3960  }
3961  __ mtc1(src, dbl_scratch);
3962  __ cvt_d_w(dbl_scratch, dbl_scratch);
3963  if (FLAG_inline_new) {
3964  __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
3965  __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
3966  __ Move(dst, t1);
3967  __ Branch(&done);
3968  }
3969 
3970  // Slow case: Call the runtime system to do the number allocation.
3971  __ bind(&slow);
3972 
3973  // TODO(3095996): Put a valid pointer value in the stack slot where the result
3974  // register is stored, as this register is in the pointer map, but contains an
3975  // integer value.
3976  __ StoreToSafepointRegisterSlot(zero_reg, dst);
3977  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3978  __ Move(dst, v0);
3979 
3980  // Done. Put the value in dbl_scratch into the value of the allocated heap
3981  // number.
3982  __ bind(&done);
3983  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
3984  __ StoreToSafepointRegisterSlot(dst, dst);
3985 }
3986 
3987 
3988 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3989  class DeferredNumberTagD: public LDeferredCode {
3990  public:
3991  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3992  : LDeferredCode(codegen), instr_(instr) { }
3993  virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3994  virtual LInstruction* instr() { return instr_; }
3995  private:
3996  LNumberTagD* instr_;
3997  };
3998 
3999  DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
4000  Register scratch = scratch0();
4001  Register reg = ToRegister(instr->result());
4002  Register temp1 = ToRegister(instr->TempAt(0));
4003  Register temp2 = ToRegister(instr->TempAt(1));
4004 
4005  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4006  if (FLAG_inline_new) {
4007  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4008  __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4009  } else {
4010  __ Branch(deferred->entry());
4011  }
4012  __ bind(deferred->exit());
4013  __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4014 }
4015 
4016 
4017 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4018  // TODO(3095996): Get rid of this. For now, we need to make the
4019  // result register contain a valid pointer because it is already
4020  // contained in the register pointer map.
4021  Register reg = ToRegister(instr->result());
4022  __ mov(reg, zero_reg);
4023 
4024  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4025  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4026  __ StoreToSafepointRegisterSlot(v0, reg);
4027 }
4028 
4029 
4030 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4031  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4032  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
4033 }
4034 
4035 
4036 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4037  Register scratch = scratch0();
4038  Register input = ToRegister(instr->InputAt(0));
4039  Register result = ToRegister(instr->result());
4040  if (instr->needs_check()) {
4042  // If the input is a HeapObject, value of scratch won't be zero.
4043  __ And(scratch, input, Operand(kHeapObjectTag));
4044  __ SmiUntag(result, input);
4045  DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
4046  } else {
4047  __ SmiUntag(result, input);
4048  }
4049 }
4050 
4051 
4052 void LCodeGen::EmitNumberUntagD(Register input_reg,
4053  DoubleRegister result_reg,
4054  bool deoptimize_on_undefined,
4055  bool deoptimize_on_minus_zero,
4056  LEnvironment* env) {
4057  Register scratch = scratch0();
4058 
4059  Label load_smi, heap_number, done;
4060 
4061  // Smi check.
4062  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4063 
4064  // Heap number map check.
4065  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4066  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4067  if (deoptimize_on_undefined) {
4068  DeoptimizeIf(ne, env, scratch, Operand(at));
4069  } else {
4070  Label heap_number;
4071  __ Branch(&heap_number, eq, scratch, Operand(at));
4072 
4073  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4074  DeoptimizeIf(ne, env, input_reg, Operand(at));
4075 
4076  // Convert undefined to NaN.
4077  __ LoadRoot(at, Heap::kNanValueRootIndex);
4078  __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
4079  __ Branch(&done);
4080 
4081  __ bind(&heap_number);
4082  }
4083  // Heap number to double register conversion.
4084  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4085  if (deoptimize_on_minus_zero) {
4086  __ mfc1(at, result_reg.low());
4087  __ Branch(&done, ne, at, Operand(zero_reg));
4088  __ mfc1(scratch, result_reg.high());
4089  DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4090  }
4091  __ Branch(&done);
4092 
4093  // Smi to double register conversion
4094  __ bind(&load_smi);
4095  // scratch: untagged value of input_reg
4096  __ mtc1(scratch, result_reg);
4097  __ cvt_d_w(result_reg, result_reg);
4098  __ bind(&done);
4099 }
4100 
4101 
4102 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4103  Register input_reg = ToRegister(instr->InputAt(0));
4104  Register scratch1 = scratch0();
4105  Register scratch2 = ToRegister(instr->TempAt(0));
4106  DoubleRegister double_scratch = double_scratch0();
4107  FPURegister single_scratch = double_scratch.low();
4108 
4109  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4110  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4111 
4112  Label done;
4113 
4114  // The input is a tagged HeapObject.
4115  // Heap number map check.
4116  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4117  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4118  // This 'at' value and scratch1 map value are used for tests in both clauses
4119  // of the if.
4120 
4121  if (instr->truncating()) {
4122  Register scratch3 = ToRegister(instr->TempAt(1));
4123  DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
4124  ASSERT(!scratch3.is(input_reg) &&
4125  !scratch3.is(scratch1) &&
4126  !scratch3.is(scratch2));
4127  // Performs a truncating conversion of a floating point number as used by
4128  // the JS bitwise operations.
4129  Label heap_number;
4130  __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
4131  // Check for undefined. Undefined is converted to zero for truncating
4132  // conversions.
4133  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4134  DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
4135  ASSERT(ToRegister(instr->result()).is(input_reg));
4136  __ mov(input_reg, zero_reg);
4137  __ Branch(&done);
4138 
4139  __ bind(&heap_number);
4140  __ ldc1(double_scratch2,
4142  __ EmitECMATruncate(input_reg,
4143  double_scratch2,
4144  single_scratch,
4145  scratch1,
4146  scratch2,
4147  scratch3);
4148  } else {
4149  // Deoptimize if we don't have a heap number.
4150  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
4151 
4152  // Load the double value.
4153  __ ldc1(double_scratch,
4155 
4156  Register except_flag = scratch2;
4157  __ EmitFPUTruncate(kRoundToZero,
4158  single_scratch,
4159  double_scratch,
4160  scratch1,
4161  except_flag,
4163 
4164  // Deopt if the operation did not succeed.
4165  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4166 
4167  // Load the result.
4168  __ mfc1(input_reg, single_scratch);
4169 
4170  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4171  __ Branch(&done, ne, input_reg, Operand(zero_reg));
4172 
4173  __ mfc1(scratch1, double_scratch.high());
4174  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4175  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4176  }
4177  }
4178  __ bind(&done);
4179 }
4180 
4181 
4182 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4183  class DeferredTaggedToI: public LDeferredCode {
4184  public:
4185  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4186  : LDeferredCode(codegen), instr_(instr) { }
4187  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4188  virtual LInstruction* instr() { return instr_; }
4189  private:
4190  LTaggedToI* instr_;
4191  };
4192 
4193  LOperand* input = instr->InputAt(0);
4194  ASSERT(input->IsRegister());
4195  ASSERT(input->Equals(instr->result()));
4196 
4197  Register input_reg = ToRegister(input);
4198 
4199  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4200 
4201  // Let the deferred code handle the HeapObject case.
4202  __ JumpIfNotSmi(input_reg, deferred->entry());
4203 
4204  // Smi to int32 conversion.
4205  __ SmiUntag(input_reg);
4206  __ bind(deferred->exit());
4207 }
4208 
4209 
4210 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4211  LOperand* input = instr->InputAt(0);
4212  ASSERT(input->IsRegister());
4213  LOperand* result = instr->result();
4214  ASSERT(result->IsDoubleRegister());
4215 
4216  Register input_reg = ToRegister(input);
4217  DoubleRegister result_reg = ToDoubleRegister(result);
4218 
4219  EmitNumberUntagD(input_reg, result_reg,
4220  instr->hydrogen()->deoptimize_on_undefined(),
4221  instr->hydrogen()->deoptimize_on_minus_zero(),
4222  instr->environment());
4223 }
4224 
4225 
4226 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4227  Register result_reg = ToRegister(instr->result());
4228  Register scratch1 = scratch0();
4229  Register scratch2 = ToRegister(instr->TempAt(0));
4230  DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
4231  FPURegister single_scratch = double_scratch0().low();
4232 
4233  if (instr->truncating()) {
4234  Register scratch3 = ToRegister(instr->TempAt(1));
4235  __ EmitECMATruncate(result_reg,
4236  double_input,
4237  single_scratch,
4238  scratch1,
4239  scratch2,
4240  scratch3);
4241  } else {
4242  Register except_flag = scratch2;
4243 
4244  __ EmitFPUTruncate(kRoundToMinusInf,
4245  single_scratch,
4246  double_input,
4247  scratch1,
4248  except_flag,
4250 
4251  // Deopt if the operation did not succeed (except_flag != 0).
4252  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4253 
4254  // Load the result.
4255  __ mfc1(result_reg, single_scratch);
4256  }
4257 }
4258 
4259 
4260 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4261  LOperand* input = instr->InputAt(0);
4262  __ And(at, ToRegister(input), Operand(kSmiTagMask));
4263  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
4264 }
4265 
4266 
4267 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4268  LOperand* input = instr->InputAt(0);
4269  __ And(at, ToRegister(input), Operand(kSmiTagMask));
4270  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
4271 }
4272 
4273 
4274 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4275  Register input = ToRegister(instr->InputAt(0));
4276  Register scratch = scratch0();
4277 
4278  __ GetObjectType(input, scratch, scratch);
4279 
4280  if (instr->hydrogen()->is_interval_check()) {
4281  InstanceType first;
4282  InstanceType last;
4283  instr->hydrogen()->GetCheckInterval(&first, &last);
4284 
4285  // If there is only one type in the interval check for equality.
4286  if (first == last) {
4287  DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
4288  } else {
4289  DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
4290  // Omit check for the last type.
4291  if (last != LAST_TYPE) {
4292  DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
4293  }
4294  }
4295  } else {
4296  uint8_t mask;
4297  uint8_t tag;
4298  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4299 
4300  if (IsPowerOf2(mask)) {
4301  ASSERT(tag == 0 || IsPowerOf2(tag));
4302  __ And(at, scratch, mask);
4303  DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
4304  at, Operand(zero_reg));
4305  } else {
4306  __ And(scratch, scratch, Operand(mask));
4307  DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
4308  }
4309  }
4310 }
4311 
4312 
4313 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4314  Register reg = ToRegister(instr->value());
4315  Handle<JSFunction> target = instr->hydrogen()->target();
4316  if (isolate()->heap()->InNewSpace(*target)) {
4317  Register reg = ToRegister(instr->value());
4318  Handle<JSGlobalPropertyCell> cell =
4319  isolate()->factory()->NewJSGlobalPropertyCell(target);
4320  __ li(at, Operand(Handle<Object>(cell)));
4322  DeoptimizeIf(ne, instr->environment(), reg,
4323  Operand(at));
4324  } else {
4325  DeoptimizeIf(ne, instr->environment(), reg,
4326  Operand(target));
4327  }
4328 }
4329 
4330 
4331 void LCodeGen::DoCheckMapCommon(Register reg,
4332  Register scratch,
4333  Handle<Map> map,
4334  CompareMapMode mode,
4335  LEnvironment* env) {
4336  Label success;
4337  __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
4338  DeoptimizeIf(al, env);
4339  __ bind(&success);
4340 }
4341 
4342 
4343 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4344  Register scratch = scratch0();
4345  LOperand* input = instr->InputAt(0);
4346  ASSERT(input->IsRegister());
4347  Register reg = ToRegister(input);
4348  Label success;
4349  SmallMapList* map_set = instr->hydrogen()->map_set();
4350  for (int i = 0; i < map_set->length() - 1; i++) {
4351  Handle<Map> map = map_set->at(i);
4352  __ CompareMapAndBranch(
4353  reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP);
4354  }
4355  Handle<Map> map = map_set->last();
4356  DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
4357  __ bind(&success);
4358 }
4359 
4360 
4361 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4362  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4363  Register result_reg = ToRegister(instr->result());
4364  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4365  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4366 }
4367 
4368 
4369 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4370  Register unclamped_reg = ToRegister(instr->unclamped());
4371  Register result_reg = ToRegister(instr->result());
4372  __ ClampUint8(result_reg, unclamped_reg);
4373 }
4374 
4375 
4376 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4377  Register scratch = scratch0();
4378  Register input_reg = ToRegister(instr->unclamped());
4379  Register result_reg = ToRegister(instr->result());
4380  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4381  Label is_smi, done, heap_number;
4382 
4383  // Both smi and heap number cases are handled.
4384  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
4385 
4386  // Check for heap number
4387  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4388  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
4389 
4390  // Check for undefined. Undefined is converted to zero for clamping
4391  // conversions.
4392  DeoptimizeIf(ne, instr->environment(), input_reg,
4393  Operand(factory()->undefined_value()));
4394  __ mov(result_reg, zero_reg);
4395  __ jmp(&done);
4396 
4397  // Heap number
4398  __ bind(&heap_number);
4399  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
4401  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4402  __ jmp(&done);
4403 
4404  __ bind(&is_smi);
4405  __ ClampUint8(result_reg, scratch);
4406 
4407  __ bind(&done);
4408 }
4409 
4410 
4411 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4412  Register temp1 = ToRegister(instr->TempAt(0));
4413  Register temp2 = ToRegister(instr->TempAt(1));
4414 
4415  Handle<JSObject> holder = instr->holder();
4416  Handle<JSObject> current_prototype = instr->prototype();
4417 
4418  // Load prototype object.
4419  __ LoadHeapObject(temp1, current_prototype);
4420 
4421  // Check prototype maps up to the holder.
4422  while (!current_prototype.is_identical_to(holder)) {
4423  DoCheckMapCommon(temp1, temp2,
4424  Handle<Map>(current_prototype->map()),
4425  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4426  current_prototype =
4427  Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4428  // Load next prototype object.
4429  __ LoadHeapObject(temp1, current_prototype);
4430  }
4431 
4432  // Check the holder map.
4433  DoCheckMapCommon(temp1, temp2,
4434  Handle<Map>(current_prototype->map()),
4435  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4436 }
4437 
4438 
4439 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4440  class DeferredAllocateObject: public LDeferredCode {
4441  public:
4442  DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4443  : LDeferredCode(codegen), instr_(instr) { }
4444  virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4445  virtual LInstruction* instr() { return instr_; }
4446  private:
4447  LAllocateObject* instr_;
4448  };
4449 
4450  DeferredAllocateObject* deferred =
4451  new(zone()) DeferredAllocateObject(this, instr);
4452 
4453  Register result = ToRegister(instr->result());
4454  Register scratch = ToRegister(instr->TempAt(0));
4455  Register scratch2 = ToRegister(instr->TempAt(1));
4456  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4457  Handle<Map> initial_map(constructor->initial_map());
4458  int instance_size = initial_map->instance_size();
4459  ASSERT(initial_map->pre_allocated_property_fields() +
4460  initial_map->unused_property_fields() -
4461  initial_map->inobject_properties() == 0);
4462 
4463  // Allocate memory for the object. The initial map might change when
4464  // the constructor's prototype changes, but instance size and property
4465  // counts remain unchanged (if slack tracking finished).
4466  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4467  __ AllocateInNewSpace(instance_size,
4468  result,
4469  scratch,
4470  scratch2,
4471  deferred->entry(),
4472  TAG_OBJECT);
4473 
4474  __ bind(deferred->exit());
4475  if (FLAG_debug_code) {
4476  Label is_in_new_space;
4477  __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4478  __ Abort("Allocated object is not in new-space");
4479  __ bind(&is_in_new_space);
4480  }
4481 
4482  // Load the initial map.
4483  Register map = scratch;
4484  __ LoadHeapObject(map, constructor);
4486 
4487  // Initialize map and fields of the newly allocated object.
4488  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4489  __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
4490  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4491  __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
4492  __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
4493  if (initial_map->inobject_properties() != 0) {
4494  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4495  for (int i = 0; i < initial_map->inobject_properties(); i++) {
4496  int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4497  __ sw(scratch, FieldMemOperand(result, property_offset));
4498  }
4499  }
4500 }
4501 
4502 
4503 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4504  Register result = ToRegister(instr->result());
4505  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4506  Handle<Map> initial_map(constructor->initial_map());
4507  int instance_size = initial_map->instance_size();
4508 
4509  // TODO(3095996): Get rid of this. For now, we need to make the
4510  // result register contain a valid pointer because it is already
4511  // contained in the register pointer map.
4512  __ mov(result, zero_reg);
4513 
4514  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4515  __ li(a0, Operand(Smi::FromInt(instance_size)));
4516  __ push(a0);
4517  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4518  __ StoreToSafepointRegisterSlot(v0, result);
4519 }
4520 
4521 
4522 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4523  Heap* heap = isolate()->heap();
4524  ElementsKind boilerplate_elements_kind =
4525  instr->hydrogen()->boilerplate_elements_kind();
4526 
4527  // Deopt if the array literal boilerplate ElementsKind is of a type different
4528  // than the expected one. The check isn't necessary if the boilerplate has
4529  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4531  boilerplate_elements_kind, true)) {
4532  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
4533  // Load map into a2.
4535  // Load the map's "bit field 2".
4537  // Retrieve elements_kind from bit field 2.
4539  DeoptimizeIf(ne,
4540  instr->environment(),
4541  a2,
4542  Operand(boilerplate_elements_kind));
4543  }
4546  __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4547  // Boilerplate already exists, constant elements are never accessed.
4548  // Pass an empty fixed array.
4549  __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
4550  __ Push(a3, a2, a1);
4551 
4552  // Pick the right runtime function or stub to call.
4553  int length = instr->hydrogen()->length();
4554  if (instr->hydrogen()->IsCopyOnWrite()) {
4555  ASSERT(instr->hydrogen()->depth() == 1);
4558  FastCloneShallowArrayStub stub(mode, length);
4559  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4560  } else if (instr->hydrogen()->depth() > 1) {
4561  CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4563  CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4564  } else {
4566  boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4569  FastCloneShallowArrayStub stub(mode, length);
4570  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4571  }
4572 }
4573 
4574 
4575 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4576  Register result,
4577  Register source,
4578  int* offset) {
4579  ASSERT(!source.is(a2));
4580  ASSERT(!result.is(a2));
4581 
4582  // Only elements backing stores for non-COW arrays need to be copied.
4583  Handle<FixedArrayBase> elements(object->elements());
4584  bool has_elements = elements->length() > 0 &&
4585  elements->map() != isolate()->heap()->fixed_cow_array_map();
4586 
4587  // Increase the offset so that subsequent objects end up right after
4588  // this object and its backing store.
4589  int object_offset = *offset;
4590  int object_size = object->map()->instance_size();
4591  int elements_offset = *offset + object_size;
4592  int elements_size = has_elements ? elements->Size() : 0;
4593  *offset += object_size + elements_size;
4594 
4595  // Copy object header.
4596  ASSERT(object->properties()->length() == 0);
4597  int inobject_properties = object->map()->inobject_properties();
4598  int header_size = object_size - inobject_properties * kPointerSize;
4599  for (int i = 0; i < header_size; i += kPointerSize) {
4600  if (has_elements && i == JSObject::kElementsOffset) {
4601  __ Addu(a2, result, Operand(elements_offset));
4602  } else {
4603  __ lw(a2, FieldMemOperand(source, i));
4604  }
4605  __ sw(a2, FieldMemOperand(result, object_offset + i));
4606  }
4607 
4608  // Copy in-object properties.
4609  for (int i = 0; i < inobject_properties; i++) {
4610  int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4611  Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4612  if (value->IsJSObject()) {
4613  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4614  __ Addu(a2, result, Operand(*offset));
4615  __ sw(a2, FieldMemOperand(result, total_offset));
4616  __ LoadHeapObject(source, value_object);
4617  EmitDeepCopy(value_object, result, source, offset);
4618  } else if (value->IsHeapObject()) {
4619  __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4620  __ sw(a2, FieldMemOperand(result, total_offset));
4621  } else {
4622  __ li(a2, Operand(value));
4623  __ sw(a2, FieldMemOperand(result, total_offset));
4624  }
4625  }
4626 
4627 
4628  if (has_elements) {
4629  // Copy elements backing store header.
4630  __ LoadHeapObject(source, elements);
4631  for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4632  __ lw(a2, FieldMemOperand(source, i));
4633  __ sw(a2, FieldMemOperand(result, elements_offset + i));
4634  }
4635 
4636  // Copy elements backing store content.
4637  int elements_length = has_elements ? elements->length() : 0;
4638  if (elements->IsFixedDoubleArray()) {
4639  Handle<FixedDoubleArray> double_array =
4641  for (int i = 0; i < elements_length; i++) {
4642  int64_t value = double_array->get_representation(i);
4643  // We only support little endian mode...
4644  int32_t value_low = value & 0xFFFFFFFF;
4645  int32_t value_high = value >> 32;
4646  int total_offset =
4647  elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4648  __ li(a2, Operand(value_low));
4649  __ sw(a2, FieldMemOperand(result, total_offset));
4650  __ li(a2, Operand(value_high));
4651  __ sw(a2, FieldMemOperand(result, total_offset + 4));
4652  }
4653  } else if (elements->IsFixedArray()) {
4654  Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
4655  for (int i = 0; i < elements_length; i++) {
4656  int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4657  Handle<Object> value(fast_elements->get(i));
4658  if (value->IsJSObject()) {
4659  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4660  __ Addu(a2, result, Operand(*offset));
4661  __ sw(a2, FieldMemOperand(result, total_offset));
4662  __ LoadHeapObject(source, value_object);
4663  EmitDeepCopy(value_object, result, source, offset);
4664  } else if (value->IsHeapObject()) {
4665  __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4666  __ sw(a2, FieldMemOperand(result, total_offset));
4667  } else {
4668  __ li(a2, Operand(value));
4669  __ sw(a2, FieldMemOperand(result, total_offset));
4670  }
4671  }
4672  } else {
4673  UNREACHABLE();
4674  }
4675  }
4676 }
4677 
4678 
4679 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4680  int size = instr->hydrogen()->total_size();
4681  ElementsKind boilerplate_elements_kind =
4682  instr->hydrogen()->boilerplate()->GetElementsKind();
4683 
4684  // Deopt if the array literal boilerplate ElementsKind is of a type different
4685  // than the expected one. The check isn't necessary if the boilerplate has
4686  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4688  boilerplate_elements_kind, true)) {
4689  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4690  // Load map into a2.
4692  // Load the map's "bit field 2".
4694  // Retrieve elements_kind from bit field 2.
4696  DeoptimizeIf(ne, instr->environment(), a2,
4697  Operand(boilerplate_elements_kind));
4698  }
4699 
4700  // Allocate all objects that are part of the literal in one big
4701  // allocation. This avoids multiple limit checks.
4702  Label allocated, runtime_allocate;
4703  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4704  __ jmp(&allocated);
4705 
4706  __ bind(&runtime_allocate);
4707  __ li(a0, Operand(Smi::FromInt(size)));
4708  __ push(a0);
4709  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4710 
4711  __ bind(&allocated);
4712  int offset = 0;
4713  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4714  EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
4715  ASSERT_EQ(size, offset);
4716 }
4717 
4718 
4719 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4720  ASSERT(ToRegister(instr->result()).is(v0));
4721  Handle<FixedArray> literals(instr->environment()->closure()->literals());
4722  Handle<FixedArray> constant_properties =
4723  instr->hydrogen()->constant_properties();
4724 
4725  // Set up the parameters to the stub/runtime call.
4726  __ LoadHeapObject(t0, literals);
4727  __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4728  __ li(a2, Operand(constant_properties));
4729  int flags = instr->hydrogen()->fast_elements()
4732  __ li(a1, Operand(Smi::FromInt(flags)));
4733  __ Push(t0, a3, a2, a1);
4734 
4735  // Pick the right runtime function or stub to call.
4736  int properties_count = constant_properties->length() / 2;
4737  if (instr->hydrogen()->depth() > 1) {
4738  CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4739  } else if (flags != ObjectLiteral::kFastElements ||
4741  CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4742  } else {
4743  FastCloneShallowObjectStub stub(properties_count);
4744  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4745  }
4746 }
4747 
4748 
4749 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4750  ASSERT(ToRegister(instr->InputAt(0)).is(a0));
4751  ASSERT(ToRegister(instr->result()).is(v0));
4752  __ push(a0);
4753  CallRuntime(Runtime::kToFastProperties, 1, instr);
4754 }
4755 
4756 
4757 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4758  Label materialized;
4759  // Registers will be used as follows:
4760  // a3 = JS function.
4761  // t3 = literals array.
4762  // a1 = regexp literal.
4763  // a0 = regexp literal clone.
4764  // a2 and t0-t2 are used as temporaries.
4767  int literal_offset = FixedArray::kHeaderSize +
4768  instr->hydrogen()->literal_index() * kPointerSize;
4769  __ lw(a1, FieldMemOperand(t3, literal_offset));
4770  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4771  __ Branch(&materialized, ne, a1, Operand(at));
4772 
4773  // Create regexp literal using runtime function
4774  // Result will be in v0.
4775  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4776  __ li(t1, Operand(instr->hydrogen()->pattern()));
4777  __ li(t0, Operand(instr->hydrogen()->flags()));
4778  __ Push(t3, t2, t1, t0);
4779  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4780  __ mov(a1, v0);
4781 
4782  __ bind(&materialized);
4784  Label allocated, runtime_allocate;
4785 
4786  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4787  __ jmp(&allocated);
4788 
4789  __ bind(&runtime_allocate);
4790  __ li(a0, Operand(Smi::FromInt(size)));
4791  __ Push(a1, a0);
4792  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4793  __ pop(a1);
4794 
4795  __ bind(&allocated);
4796  // Copy the content into the newly allocated memory.
4797  // (Unroll copy loop once for better throughput).
4798  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4799  __ lw(a3, FieldMemOperand(a1, i));
4800  __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
4801  __ sw(a3, FieldMemOperand(v0, i));
4802  __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
4803  }
4804  if ((size % (2 * kPointerSize)) != 0) {
4805  __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
4806  __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
4807  }
4808 }
4809 
4810 
4811 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4812  // Use the fast case closure allocation code that allocates in new
4813  // space for nested functions that don't need literals cloning.
4814  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4815  bool pretenure = instr->hydrogen()->pretenure();
4816  if (!pretenure && shared_info->num_literals() == 0) {
4817  FastNewClosureStub stub(shared_info->language_mode());
4818  __ li(a1, Operand(shared_info));
4819  __ push(a1);
4820  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4821  } else {
4822  __ li(a2, Operand(shared_info));
4823  __ li(a1, Operand(pretenure
4824  ? factory()->true_value()
4825  : factory()->false_value()));
4826  __ Push(cp, a2, a1);
4827  CallRuntime(Runtime::kNewClosure, 3, instr);
4828  }
4829 }
4830 
4831 
4832 void LCodeGen::DoTypeof(LTypeof* instr) {
4833  ASSERT(ToRegister(instr->result()).is(v0));
4834  Register input = ToRegister(instr->InputAt(0));
4835  __ push(input);
4836  CallRuntime(Runtime::kTypeof, 1, instr);
4837 }
4838 
4839 
4840 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4841  Register input = ToRegister(instr->InputAt(0));
4842  int true_block = chunk_->LookupDestination(instr->true_block_id());
4843  int false_block = chunk_->LookupDestination(instr->false_block_id());
4844  Label* true_label = chunk_->GetAssemblyLabel(true_block);
4845  Label* false_label = chunk_->GetAssemblyLabel(false_block);
4846 
4847  Register cmp1 = no_reg;
4848  Operand cmp2 = Operand(no_reg);
4849 
4850  Condition final_branch_condition = EmitTypeofIs(true_label,
4851  false_label,
4852  input,
4853  instr->type_literal(),
4854  cmp1,
4855  cmp2);
4856 
4857  ASSERT(cmp1.is_valid());
4858  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
4859 
4860  if (final_branch_condition != kNoCondition) {
4861  EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
4862  }
4863 }
4864 
4865 
4866 Condition LCodeGen::EmitTypeofIs(Label* true_label,
4867  Label* false_label,
4868  Register input,
4869  Handle<String> type_name,
4870  Register& cmp1,
4871  Operand& cmp2) {
4872  // This function utilizes the delay slot heavily. This is used to load
4873  // values that are always usable without depending on the type of the input
4874  // register.
4875  Condition final_branch_condition = kNoCondition;
4876  Register scratch = scratch0();
4877  if (type_name->Equals(heap()->number_symbol())) {
4878  __ JumpIfSmi(input, true_label);
4879  __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4880  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4881  cmp1 = input;
4882  cmp2 = Operand(at);
4883  final_branch_condition = eq;
4884 
4885  } else if (type_name->Equals(heap()->string_symbol())) {
4886  __ JumpIfSmi(input, false_label);
4887  __ GetObjectType(input, input, scratch);
4888  __ Branch(USE_DELAY_SLOT, false_label,
4889  ge, scratch, Operand(FIRST_NONSTRING_TYPE));
4890  // input is an object so we can load the BitFieldOffset even if we take the
4891  // other branch.
4892  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4893  __ And(at, at, 1 << Map::kIsUndetectable);
4894  cmp1 = at;
4895  cmp2 = Operand(zero_reg);
4896  final_branch_condition = eq;
4897 
4898  } else if (type_name->Equals(heap()->boolean_symbol())) {
4899  __ LoadRoot(at, Heap::kTrueValueRootIndex);
4900  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4901  __ LoadRoot(at, Heap::kFalseValueRootIndex);
4902  cmp1 = at;
4903  cmp2 = Operand(input);
4904  final_branch_condition = eq;
4905 
4906  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4907  __ LoadRoot(at, Heap::kNullValueRootIndex);
4908  cmp1 = at;
4909  cmp2 = Operand(input);
4910  final_branch_condition = eq;
4911 
4912  } else if (type_name->Equals(heap()->undefined_symbol())) {
4913  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4914  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4915  // The first instruction of JumpIfSmi is an And - it is safe in the delay
4916  // slot.
4917  __ JumpIfSmi(input, false_label);
4918  // Check for undetectable objects => true.
4919  __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4920  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4921  __ And(at, at, 1 << Map::kIsUndetectable);
4922  cmp1 = at;
4923  cmp2 = Operand(zero_reg);
4924  final_branch_condition = ne;
4925 
4926  } else if (type_name->Equals(heap()->function_symbol())) {
4928  __ JumpIfSmi(input, false_label);
4929  __ GetObjectType(input, scratch, input);
4930  __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
4931  cmp1 = input;
4932  cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
4933  final_branch_condition = eq;
4934 
4935  } else if (type_name->Equals(heap()->object_symbol())) {
4936  __ JumpIfSmi(input, false_label);
4937  if (!FLAG_harmony_typeof) {
4938  __ LoadRoot(at, Heap::kNullValueRootIndex);
4939  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4940  }
4941  // input is an object, it is safe to use GetObjectType in the delay slot.
4942  __ GetObjectType(input, input, scratch);
4943  __ Branch(USE_DELAY_SLOT, false_label,
4944  lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4945  // Still an object, so the InstanceType can be loaded.
4946  __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
4947  __ Branch(USE_DELAY_SLOT, false_label,
4948  gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4949  // Still an object, so the BitField can be loaded.
4950  // Check for undetectable objects => false.
4951  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4952  __ And(at, at, 1 << Map::kIsUndetectable);
4953  cmp1 = at;
4954  cmp2 = Operand(zero_reg);
4955  final_branch_condition = eq;
4956 
4957  } else {
4958  cmp1 = at;
4959  cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
4960  __ Branch(false_label);
4961  }
4962 
4963  return final_branch_condition;
4964 }
4965 
4966 
4967 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4968  Register temp1 = ToRegister(instr->TempAt(0));
4969  int true_block = chunk_->LookupDestination(instr->true_block_id());
4970  int false_block = chunk_->LookupDestination(instr->false_block_id());
4971 
4972  EmitIsConstructCall(temp1, scratch0());
4973 
4974  EmitBranch(true_block, false_block, eq, temp1,
4975  Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4976 }
4977 
4978 
4979 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
4980  ASSERT(!temp1.is(temp2));
4981  // Get the frame pointer for the calling frame.
4983 
4984  // Skip the arguments adaptor frame if it exists.
4985  Label check_frame_marker;
4987  __ Branch(&check_frame_marker, ne, temp2,
4990 
4991  // Check the marker in the calling frame.
4992  __ bind(&check_frame_marker);
4994 }
4995 
4996 
4997 void LCodeGen::EnsureSpaceForLazyDeopt() {
4998  // Ensure that we have enough space after the previous lazy-bailout
4999  // instruction for patching the code here.
5000  int current_pc = masm()->pc_offset();
5001  int patch_size = Deoptimizer::patch_size();
5002  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5003  int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5004  ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5005  while (padding_size > 0) {
5006  __ nop();
5007  padding_size -= Assembler::kInstrSize;
5008  }
5009  }
5010  last_lazy_deopt_pc_ = masm()->pc_offset();
5011 }
5012 
5013 
5014 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5015  EnsureSpaceForLazyDeopt();
5016  ASSERT(instr->HasEnvironment());
5017  LEnvironment* env = instr->environment();
5018  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5019  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5020 }
5021 
5022 
5023 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5024  DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
5025 }
5026 
5027 
5028 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5029  Register object = ToRegister(instr->object());
5030  Register key = ToRegister(instr->key());
5031  Register strict = scratch0();
5032  __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
5033  __ Push(object, key, strict);
5034  ASSERT(instr->HasPointerMap());
5035  LPointerMap* pointers = instr->pointer_map();
5036  RecordPosition(pointers->position());
5037  SafepointGenerator safepoint_generator(
5038  this, pointers, Safepoint::kLazyDeopt);
5039  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
5040 }
5041 
5042 
5043 void LCodeGen::DoIn(LIn* instr) {
5044  Register obj = ToRegister(instr->object());
5045  Register key = ToRegister(instr->key());
5046  __ Push(key, obj);
5047  ASSERT(instr->HasPointerMap());
5048  LPointerMap* pointers = instr->pointer_map();
5049  RecordPosition(pointers->position());
5050  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
5051  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
5052 }
5053 
5054 
5055 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5056  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5057  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5058  RecordSafepointWithLazyDeopt(
5059  instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5060  ASSERT(instr->HasEnvironment());
5061  LEnvironment* env = instr->environment();
5062  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5063 }
5064 
5065 
5066 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5067  class DeferredStackCheck: public LDeferredCode {
5068  public:
5069  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5070  : LDeferredCode(codegen), instr_(instr) { }
5071  virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5072  virtual LInstruction* instr() { return instr_; }
5073  private:
5074  LStackCheck* instr_;
5075  };
5076 
5077  ASSERT(instr->HasEnvironment());
5078  LEnvironment* env = instr->environment();
5079  // There is no LLazyBailout instruction for stack-checks. We have to
5080  // prepare for lazy deoptimization explicitly here.
5081  if (instr->hydrogen()->is_function_entry()) {
5082  // Perform stack overflow check.
5083  Label done;
5084  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5085  __ Branch(&done, hs, sp, Operand(at));
5086  StackCheckStub stub;
5087  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5088  EnsureSpaceForLazyDeopt();
5089  __ bind(&done);
5090  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5091  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5092  } else {
5093  ASSERT(instr->hydrogen()->is_backwards_branch());
5094  // Perform stack overflow check if this goto needs it before jumping.
5095  DeferredStackCheck* deferred_stack_check =
5096  new(zone()) DeferredStackCheck(this, instr);
5097  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5098  __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5099  EnsureSpaceForLazyDeopt();
5100  __ bind(instr->done_label());
5101  deferred_stack_check->SetExit(instr->done_label());
5102  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5103  // Don't record a deoptimization index for the safepoint here.
5104  // This will be done explicitly when emitting call and the safepoint in
5105  // the deferred code.
5106  }
5107 }
5108 
5109 
5110 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5111  // This is a pseudo-instruction that ensures that the environment here is
5112  // properly registered for deoptimization and records the assembler's PC
5113  // offset.
5114  LEnvironment* environment = instr->environment();
5115  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5116  instr->SpilledDoubleRegisterArray());
5117 
5118  // If the environment were already registered, we would have no way of
5119  // backpatching it with the spill slot operands.
5120  ASSERT(!environment->HasBeenRegistered());
5121  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5122  ASSERT(osr_pc_offset_ == -1);
5123  osr_pc_offset_ = masm()->pc_offset();
5124 }
5125 
5126 
5127 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5128  Register result = ToRegister(instr->result());
5129  Register object = ToRegister(instr->object());
5130  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5131  DeoptimizeIf(eq, instr->environment(), object, Operand(at));
5132 
5133  Register null_value = t1;
5134  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5135  DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
5136 
5137  __ And(at, object, kSmiTagMask);
5138  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5139 
5141  __ GetObjectType(object, a1, a1);
5142  DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5143 
5144  Label use_cache, call_runtime;
5145  ASSERT(object.is(a0));
5146  __ CheckEnumCache(null_value, &call_runtime);
5147 
5148  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5149  __ Branch(&use_cache);
5150 
5151  // Get the set of properties to enumerate.
5152  __ bind(&call_runtime);
5153  __ push(object);
5154  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5155 
5157  ASSERT(result.is(v0));
5158  __ LoadRoot(at, Heap::kMetaMapRootIndex);
5159  DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5160  __ bind(&use_cache);
5161 }
5162 
5163 
5164 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5165  Register map = ToRegister(instr->map());
5166  Register result = ToRegister(instr->result());
5167  __ LoadInstanceDescriptors(map, result);
5168  __ lw(result,
5170  __ lw(result,
5171  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5172  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5173 }
5174 
5175 
5176 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5177  Register object = ToRegister(instr->value());
5178  Register map = ToRegister(instr->map());
5179  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5180  DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5181 }
5182 
5183 
5184 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5185  Register object = ToRegister(instr->object());
5186  Register index = ToRegister(instr->index());
5187  Register result = ToRegister(instr->result());
5188  Register scratch = scratch0();
5189 
5190  Label out_of_object, done;
5191  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5192  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5193 
5195  __ Addu(scratch, object, scratch);
5196  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5197 
5198  __ Branch(&done);
5199 
5200  __ bind(&out_of_object);
5201  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5202  // Index is equal to negated out of object property index plus 1.
5203  __ Subu(scratch, result, scratch);
5204  __ lw(result, FieldMemOperand(scratch,
5205  FixedArray::kHeaderSize - kPointerSize));
5206  __ bind(&done);
5207 }
5208 
5209 
5210 #undef __
5211 
5212 } } // namespace v8::internal
byte * Address
Definition: globals.h:172
const FPURegister f4
const Register cp
const int kMinInt
Definition: globals.h:225
static const int kBitFieldOffset
Definition: objects.h:4994
static LGap * cast(LInstruction *instr)
Definition: lithium-arm.h:318
const intptr_t kSmiTagMask
Definition: v8.h:3855
static const int kCodeEntryOffset
Definition: objects.h:5981
static const int kMaxAsciiCharCode
Definition: objects.h:7107
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:5982
const char * ToCString(const v8::String::Utf8Value &value)
static int SlotOffset(int index)
Definition: contexts.h:408
void PrintF(const char *format,...)
Definition: v8utils.cc:40
const FPURegister f0
static Smi * FromInt(int value)
Definition: objects-inl.h:973
bool IsFastObjectElementsKind(ElementsKind kind)
const int KB
Definition: globals.h:221
static const int kElementsKindBitCount
Definition: objects.h:5016
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
Definition: handles.h:81
static const int kGlobalReceiverOffset
Definition: objects.h:6085
Flag flags[]
Definition: flags.cc:1467
static const int kExponentBias
Definition: objects.h:1321
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:38
static const int kExternalPointerOffset
Definition: objects.h:3720
static const int kSize
Definition: objects.h:6433
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
Definition: globals.h:246
static const int kInObjectFieldCount
Definition: objects.h:6487
static const int kMaximumSlots
Definition: code-stubs.h:343
MemOperand GlobalObjectOperand()
static const int kInstanceClassNameOffset
Definition: objects.h:5609
int WhichPowerOf2(uint32_t x)
Definition: utils.h:56
static const int kGlobalContextOffset
Definition: objects.h:6084
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:5986
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
Definition: handles.cc:326
#define V8_INFINITY
Definition: globals.h:32
static const int kHashFieldOffset
Definition: objects.h:7099
static DwVfpRegister FromAllocationIndex(int index)
Condition ReverseCondition(Condition cond)
#define __
#define IN
const Register sp
const uint32_t kSlotsZapValue
Definition: v8globals.h:92
static const int kLiteralsOffset
Definition: objects.h:5987
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7098
static const int kExponentShift
Definition: objects.h:1322
FPURegister FloatRegister
static const int kValueOffset
Definition: objects.h:1307
const uint32_t kHoleNanUpper32
Definition: v8globals.h:476
const int kPointerSize
Definition: globals.h:234
const int kHeapObjectTag
Definition: v8.h:3848
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:269
const uint32_t kHoleNanLower32
Definition: v8globals.h:477
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
static void VPrint(const char *format, va_list args)
static const int kCacheStampOffset
Definition: objects.h:6280
static const int kPropertiesOffset
Definition: objects.h:2113
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
Definition: objects.h:2374
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kElementsOffset
Definition: objects.h:2114
static const int kContainsCachedArrayIndexMask
Definition: objects.h:7154
bool IsPowerOf2(T x)
Definition: utils.h:50
const FPURegister f2
static Vector< T > New(int length)
Definition: utils.h:369
friend class BlockTrampolinePoolScope
int ElementsKindToShiftSize(ElementsKind elements_kind)
Definition: lithium.cc:213
Vector< const char > CStrVector(const char *data)
Definition: utils.h:525
static int OffsetOfElementAt(int index)
Definition: objects.h:2291
static const int kLengthOffset
Definition: objects.h:8111
static int SizeFor(int length)
Definition: objects.h:2288
static const int kHeaderSize
Definition: objects.h:2233
static const int kEnumerationIndexOffset
Definition: objects.h:2622
static const int kMapOffset
Definition: objects.h:1219
static const int kValueOffset
Definition: objects.h:6272
static const int kLengthOffset
Definition: objects.h:2232
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:445
MemOperand FieldMemOperand(Register object, int offset)
static const int kHasNonInstancePrototype
Definition: objects.h:5001
ElementsKind GetInitialFastElementsKind()
static const uint32_t kSignMask
Definition: objects.h:1316
const int kSmiTagSize
Definition: v8.h:3854
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static const int kElementsKindShift
Definition: objects.h:5015
SwVfpRegister low() const
static const int kConstructorOffset
Definition: objects.h:4954
static double canonical_not_the_hole_nan_as_double()
Definition: objects-inl.h:1714
static const int kIsUndetectable
Definition: objects.h:5005
static const int kHeaderSize
Definition: objects.h:2115
const FPURegister f12
static const int kInstrSize
static const int kPrototypeOffset
Definition: objects.h:4953
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static const int kValueOffset
Definition: objects.h:6188
const Register fp
static const int kExponentBits
Definition: objects.h:1320
static const int kCompilerHintsOffset
Definition: objects.h:5677
static const int kSharedFunctionInfoOffset
Definition: objects.h:5984
Register ToRegister(int num)
const FPURegister f14
static const int kBitField2Offset
Definition: objects.h:4995
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
Definition: ic.cc:2544
#define ARRAY_SIZE(a)
Definition: globals.h:295
static const int kExponentOffset
Definition: objects.h:1313
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1223
static JSObject * cast(Object *obj)
FlagType type() const
Definition: flags.cc:1358
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
Definition: objects.h:4992
virtual void BeforeCall(int call_size) const
static const int kMantissaOffset
Definition: objects.h:1312