v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 
39 class SafepointGenerator : public CallWrapper {
40  public:
41  SafepointGenerator(LCodeGen* codegen,
42  LPointerMap* pointers,
43  Safepoint::DeoptMode mode)
44  : codegen_(codegen),
45  pointers_(pointers),
46  deopt_mode_(mode) { }
47  virtual ~SafepointGenerator() { }
48 
49  virtual void BeforeCall(int call_size) const { }
50 
51  virtual void AfterCall() const {
52  codegen_->RecordSafepoint(pointers_, deopt_mode_);
53  }
54 
55  private:
56  LCodeGen* codegen_;
57  LPointerMap* pointers_;
58  Safepoint::DeoptMode deopt_mode_;
59 };
60 
61 
62 #define __ masm()->
63 
64 bool LCodeGen::GenerateCode() {
65  HPhase phase("Z_Code generation", chunk());
66  ASSERT(is_unused());
67  status_ = GENERATING;
68  CpuFeatures::Scope scope(FPU);
69 
70  CodeStub::GenerateFPStubs();
71 
72  // Open a frame scope to indicate that there is a frame on the stack. The
73  // NONE indicates that the scope shouldn't actually generate code to set up
74  // the frame (that is done in GeneratePrologue).
75  FrameScope frame_scope(masm_, StackFrame::NONE);
76 
77  return GeneratePrologue() &&
78  GenerateBody() &&
79  GenerateDeferredCode() &&
80  GenerateSafepointTable();
81 }
82 
83 
84 void LCodeGen::FinishCode(Handle<Code> code) {
85  ASSERT(is_done());
86  code->set_stack_slots(GetStackSlotCount());
87  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
88  PopulateDeoptimizationData(code);
89 }
90 
91 
92 void LChunkBuilder::Abort(const char* reason) {
93  info()->set_bailout_reason(reason);
94  status_ = ABORTED;
95 }
96 
97 
98 void LCodeGen::Comment(const char* format, ...) {
99  if (!FLAG_code_comments) return;
100  char buffer[4 * KB];
101  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
102  va_list arguments;
103  va_start(arguments, format);
104  builder.AddFormattedList(format, arguments);
105  va_end(arguments);
106 
107  // Copy the string before recording it in the assembler to avoid
108  // issues when the stack allocated buffer goes out of scope.
109  size_t length = builder.position();
110  Vector<char> copy = Vector<char>::New(length + 1);
111  memcpy(copy.start(), builder.Finalize(), copy.length());
112  masm()->RecordComment(copy.start());
113 }
114 
115 
116 bool LCodeGen::GeneratePrologue() {
117  ASSERT(is_generating());
118 
120 
121 #ifdef DEBUG
122  if (strlen(FLAG_stop_at) > 0 &&
123  info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
124  __ stop("stop_at");
125  }
126 #endif
127 
128  // a1: Callee's JS function.
129  // cp: Callee's context.
130  // fp: Caller's frame pointer.
131  // lr: Caller's pc.
132 
133  // Strict mode functions and builtins need to replace the receiver
134  // with undefined when called as functions (without an explicit
135  // receiver object). r5 is zero for method calls and non-zero for
136  // function calls.
137  if (!info_->is_classic_mode() || info_->is_native()) {
138  Label ok;
139  __ Branch(&ok, eq, t1, Operand(zero_reg));
140 
141  int receiver_offset = scope()->num_parameters() * kPointerSize;
142  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
143  __ sw(a2, MemOperand(sp, receiver_offset));
144  __ bind(&ok);
145  }
146 
147  __ Push(ra, fp, cp, a1);
148  __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
149 
150  // Reserve space for the stack slots needed by the code.
151  int slots = GetStackSlotCount();
152  if (slots > 0) {
153  if (FLAG_debug_code) {
154  __ li(a0, Operand(slots));
155  __ li(a2, Operand(kSlotsZapValue));
156  Label loop;
157  __ bind(&loop);
158  __ push(a2);
159  __ Subu(a0, a0, 1);
160  __ Branch(&loop, ne, a0, Operand(zero_reg));
161  } else {
162  __ Subu(sp, sp, Operand(slots * kPointerSize));
163  }
164  }
165 
166  // Possibly allocate a local context.
167  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
168  if (heap_slots > 0) {
169  Comment(";;; Allocate local context");
170  // Argument to NewContext is the function, which is in a1.
171  __ push(a1);
172  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
173  FastNewContextStub stub(heap_slots);
174  __ CallStub(&stub);
175  } else {
176  __ CallRuntime(Runtime::kNewFunctionContext, 1);
177  }
178  RecordSafepoint(Safepoint::kNoLazyDeopt);
179  // Context is returned in both v0 and cp. It replaces the context
180  // passed to us. It's saved in the stack and kept live in cp.
182  // Copy any necessary parameters into the context.
183  int num_parameters = scope()->num_parameters();
184  for (int i = 0; i < num_parameters; i++) {
185  Variable* var = scope()->parameter(i);
186  if (var->IsContextSlot()) {
187  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
188  (num_parameters - 1 - i) * kPointerSize;
189  // Load parameter from stack.
190  __ lw(a0, MemOperand(fp, parameter_offset));
191  // Store it in the context.
192  MemOperand target = ContextOperand(cp, var->index());
193  __ sw(a0, target);
194  // Update the write barrier. This clobbers a3 and a0.
195  __ RecordWriteContextSlot(
196  cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
197  }
198  }
199  Comment(";;; End allocate local context");
200  }
201 
202  // Trace the call.
203  if (FLAG_trace) {
204  __ CallRuntime(Runtime::kTraceEnter, 0);
205  }
206  EnsureSpaceForLazyDeopt();
207  return !is_aborted();
208 }
209 
210 
211 bool LCodeGen::GenerateBody() {
212  ASSERT(is_generating());
213  bool emit_instructions = true;
214  for (current_instruction_ = 0;
215  !is_aborted() && current_instruction_ < instructions_->length();
216  current_instruction_++) {
217  LInstruction* instr = instructions_->at(current_instruction_);
218  if (instr->IsLabel()) {
219  LLabel* label = LLabel::cast(instr);
220  emit_instructions = !label->HasReplacement();
221  }
222 
223  if (emit_instructions) {
224  Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
225  instr->CompileToNative(this);
226  }
227  }
228  return !is_aborted();
229 }
230 
231 
232 bool LCodeGen::GenerateDeferredCode() {
233  ASSERT(is_generating());
234  if (deferred_.length() > 0) {
235  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
236  LDeferredCode* code = deferred_[i];
237  __ bind(code->entry());
238  Comment(";;; Deferred code @%d: %s.",
239  code->instruction_index(),
240  code->instr()->Mnemonic());
241  code->Generate();
242  __ jmp(code->exit());
243  }
244  }
245  // Deferred code is the last part of the instruction sequence. Mark
246  // the generated code as done unless we bailed out.
247  if (!is_aborted()) status_ = DONE;
248  return !is_aborted();
249 }
250 
251 
252 bool LCodeGen::GenerateDeoptJumpTable() {
253  // TODO(plind): not clear that this will have advantage for MIPS.
254  // Skipping it for now. Raised issue #100 for this.
255  Abort("Unimplemented: GenerateDeoptJumpTable");
256  return false;
257 }
258 
259 
260 bool LCodeGen::GenerateSafepointTable() {
261  ASSERT(is_done());
262  safepoints_.Emit(masm(), GetStackSlotCount());
263  return !is_aborted();
264 }
265 
266 
267 Register LCodeGen::ToRegister(int index) const {
268  return Register::FromAllocationIndex(index);
269 }
270 
271 
272 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
274 }
275 
276 
277 Register LCodeGen::ToRegister(LOperand* op) const {
278  ASSERT(op->IsRegister());
279  return ToRegister(op->index());
280 }
281 
282 
283 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
284  if (op->IsRegister()) {
285  return ToRegister(op->index());
286  } else if (op->IsConstantOperand()) {
287  LConstantOperand* const_op = LConstantOperand::cast(op);
288  HConstant* constant = chunk_->LookupConstant(const_op);
289  Handle<Object> literal = constant->handle();
290  Representation r = chunk_->LookupLiteralRepresentation(const_op);
291  if (r.IsInteger32()) {
292  ASSERT(literal->IsNumber());
293  __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
294  } else if (r.IsDouble()) {
295  Abort("EmitLoadRegister: Unsupported double immediate.");
296  } else {
297  ASSERT(r.IsTagged());
298  if (literal->IsSmi()) {
299  __ li(scratch, Operand(literal));
300  } else {
301  __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
302  }
303  }
304  return scratch;
305  } else if (op->IsStackSlot() || op->IsArgument()) {
306  __ lw(scratch, ToMemOperand(op));
307  return scratch;
308  }
309  UNREACHABLE();
310  return scratch;
311 }
312 
313 
314 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
315  ASSERT(op->IsDoubleRegister());
316  return ToDoubleRegister(op->index());
317 }
318 
319 
320 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
321  FloatRegister flt_scratch,
322  DoubleRegister dbl_scratch) {
323  if (op->IsDoubleRegister()) {
324  return ToDoubleRegister(op->index());
325  } else if (op->IsConstantOperand()) {
326  LConstantOperand* const_op = LConstantOperand::cast(op);
327  HConstant* constant = chunk_->LookupConstant(const_op);
328  Handle<Object> literal = constant->handle();
329  Representation r = chunk_->LookupLiteralRepresentation(const_op);
330  if (r.IsInteger32()) {
331  ASSERT(literal->IsNumber());
332  __ li(at, Operand(static_cast<int32_t>(literal->Number())));
333  __ mtc1(at, flt_scratch);
334  __ cvt_d_w(dbl_scratch, flt_scratch);
335  return dbl_scratch;
336  } else if (r.IsDouble()) {
337  Abort("unsupported double immediate");
338  } else if (r.IsTagged()) {
339  Abort("unsupported tagged immediate");
340  }
341  } else if (op->IsStackSlot() || op->IsArgument()) {
342  MemOperand mem_op = ToMemOperand(op);
343  __ ldc1(dbl_scratch, mem_op);
344  return dbl_scratch;
345  }
346  UNREACHABLE();
347  return dbl_scratch;
348 }
349 
350 
351 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
352  HConstant* constant = chunk_->LookupConstant(op);
353  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
354  return constant->handle();
355 }
356 
357 
358 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
359  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
360 }
361 
362 
363 int LCodeGen::ToInteger32(LConstantOperand* op) const {
364  HConstant* constant = chunk_->LookupConstant(op);
365  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
366  ASSERT(constant->HasInteger32Value());
367  return constant->Integer32Value();
368 }
369 
370 
371 double LCodeGen::ToDouble(LConstantOperand* op) const {
372  HConstant* constant = chunk_->LookupConstant(op);
373  ASSERT(constant->HasDoubleValue());
374  return constant->DoubleValue();
375 }
376 
377 
378 Operand LCodeGen::ToOperand(LOperand* op) {
379  if (op->IsConstantOperand()) {
380  LConstantOperand* const_op = LConstantOperand::cast(op);
381  HConstant* constant = chunk()->LookupConstant(const_op);
382  Representation r = chunk_->LookupLiteralRepresentation(const_op);
383  if (r.IsInteger32()) {
384  ASSERT(constant->HasInteger32Value());
385  return Operand(constant->Integer32Value());
386  } else if (r.IsDouble()) {
387  Abort("ToOperand Unsupported double immediate.");
388  }
389  ASSERT(r.IsTagged());
390  return Operand(constant->handle());
391  } else if (op->IsRegister()) {
392  return Operand(ToRegister(op));
393  } else if (op->IsDoubleRegister()) {
394  Abort("ToOperand IsDoubleRegister unimplemented");
395  return Operand(0);
396  }
397  // Stack slots not implemented, use ToMemOperand instead.
398  UNREACHABLE();
399  return Operand(0);
400 }
401 
402 
403 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
404  ASSERT(!op->IsRegister());
405  ASSERT(!op->IsDoubleRegister());
406  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
407  int index = op->index();
408  if (index >= 0) {
409  // Local or spill slot. Skip the frame pointer, function, and
410  // context in the fixed part of the frame.
411  return MemOperand(fp, -(index + 3) * kPointerSize);
412  } else {
413  // Incoming parameter. Skip the return address.
414  return MemOperand(fp, -(index - 1) * kPointerSize);
415  }
416 }
417 
418 
419 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
420  ASSERT(op->IsDoubleStackSlot());
421  int index = op->index();
422  if (index >= 0) {
423  // Local or spill slot. Skip the frame pointer, function, context,
424  // and the first word of the double in the fixed part of the frame.
425  return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
426  } else {
427  // Incoming parameter. Skip the return address and the first word of
428  // the double.
429  return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
430  }
431 }
432 
433 
434 void LCodeGen::WriteTranslation(LEnvironment* environment,
435  Translation* translation,
436  int* arguments_index,
437  int* arguments_count) {
438  if (environment == NULL) return;
439 
440  // The translation includes one command per value in the environment.
441  int translation_size = environment->values()->length();
442  // The output frame height does not include the parameters.
443  int height = translation_size - environment->parameter_count();
444 
445  // Function parameters are arguments to the outermost environment. The
446  // arguments index points to the first element of a sequence of tagged
447  // values on the stack that represent the arguments. This needs to be
448  // kept in sync with the LArgumentsElements implementation.
449  *arguments_index = -environment->parameter_count();
450  *arguments_count = environment->parameter_count();
451 
452  WriteTranslation(environment->outer(),
453  translation,
454  arguments_index,
455  arguments_count);
456  int closure_id = *info()->closure() != *environment->closure()
457  ? DefineDeoptimizationLiteral(environment->closure())
458  : Translation::kSelfLiteralId;
459 
460  switch (environment->frame_type()) {
461  case JS_FUNCTION:
462  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
463  break;
464  case JS_CONSTRUCT:
465  translation->BeginConstructStubFrame(closure_id, translation_size);
466  break;
467  case JS_GETTER:
468  ASSERT(translation_size == 1);
469  ASSERT(height == 0);
470  translation->BeginGetterStubFrame(closure_id);
471  break;
472  case JS_SETTER:
473  ASSERT(translation_size == 2);
474  ASSERT(height == 0);
475  translation->BeginSetterStubFrame(closure_id);
476  break;
477  case ARGUMENTS_ADAPTOR:
478  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
479  break;
480  }
481 
482  // Inlined frames which push their arguments cause the index to be
483  // bumped and a new stack area to be used for materialization.
484  if (environment->entry() != NULL &&
485  environment->entry()->arguments_pushed()) {
486  *arguments_index = *arguments_index < 0
487  ? GetStackSlotCount()
488  : *arguments_index + *arguments_count;
489  *arguments_count = environment->entry()->arguments_count() + 1;
490  }
491 
492  for (int i = 0; i < translation_size; ++i) {
493  LOperand* value = environment->values()->at(i);
494  // spilled_registers_ and spilled_double_registers_ are either
495  // both NULL or both set.
496  if (environment->spilled_registers() != NULL && value != NULL) {
497  if (value->IsRegister() &&
498  environment->spilled_registers()[value->index()] != NULL) {
499  translation->MarkDuplicate();
500  AddToTranslation(translation,
501  environment->spilled_registers()[value->index()],
502  environment->HasTaggedValueAt(i),
503  environment->HasUint32ValueAt(i),
504  *arguments_index,
505  *arguments_count);
506  } else if (
507  value->IsDoubleRegister() &&
508  environment->spilled_double_registers()[value->index()] != NULL) {
509  translation->MarkDuplicate();
510  AddToTranslation(
511  translation,
512  environment->spilled_double_registers()[value->index()],
513  false,
514  false,
515  *arguments_index,
516  *arguments_count);
517  }
518  }
519 
520  AddToTranslation(translation,
521  value,
522  environment->HasTaggedValueAt(i),
523  environment->HasUint32ValueAt(i),
524  *arguments_index,
525  *arguments_count);
526  }
527 }
528 
529 
530 void LCodeGen::AddToTranslation(Translation* translation,
531  LOperand* op,
532  bool is_tagged,
533  bool is_uint32,
534  int arguments_index,
535  int arguments_count) {
536  if (op == NULL) {
537  // TODO(twuerthinger): Introduce marker operands to indicate that this value
538  // is not present and must be reconstructed from the deoptimizer. Currently
539  // this is only used for the arguments object.
540  translation->StoreArgumentsObject(arguments_index, arguments_count);
541  } else if (op->IsStackSlot()) {
542  if (is_tagged) {
543  translation->StoreStackSlot(op->index());
544  } else if (is_uint32) {
545  translation->StoreUint32StackSlot(op->index());
546  } else {
547  translation->StoreInt32StackSlot(op->index());
548  }
549  } else if (op->IsDoubleStackSlot()) {
550  translation->StoreDoubleStackSlot(op->index());
551  } else if (op->IsArgument()) {
552  ASSERT(is_tagged);
553  int src_index = GetStackSlotCount() + op->index();
554  translation->StoreStackSlot(src_index);
555  } else if (op->IsRegister()) {
556  Register reg = ToRegister(op);
557  if (is_tagged) {
558  translation->StoreRegister(reg);
559  } else if (is_uint32) {
560  translation->StoreUint32Register(reg);
561  } else {
562  translation->StoreInt32Register(reg);
563  }
564  } else if (op->IsDoubleRegister()) {
565  DoubleRegister reg = ToDoubleRegister(op);
566  translation->StoreDoubleRegister(reg);
567  } else if (op->IsConstantOperand()) {
568  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
569  int src_index = DefineDeoptimizationLiteral(constant->handle());
570  translation->StoreLiteral(src_index);
571  } else {
572  UNREACHABLE();
573  }
574 }
575 
576 
577 void LCodeGen::CallCode(Handle<Code> code,
578  RelocInfo::Mode mode,
579  LInstruction* instr) {
580  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
581 }
582 
583 
584 void LCodeGen::CallCodeGeneric(Handle<Code> code,
585  RelocInfo::Mode mode,
586  LInstruction* instr,
587  SafepointMode safepoint_mode) {
588  ASSERT(instr != NULL);
589  LPointerMap* pointers = instr->pointer_map();
590  RecordPosition(pointers->position());
591  __ Call(code, mode);
592  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
593 }
594 
595 
596 void LCodeGen::CallRuntime(const Runtime::Function* function,
597  int num_arguments,
598  LInstruction* instr) {
599  ASSERT(instr != NULL);
600  LPointerMap* pointers = instr->pointer_map();
601  ASSERT(pointers != NULL);
602  RecordPosition(pointers->position());
603 
604  __ CallRuntime(function, num_arguments);
605  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
606 }
607 
608 
609 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
610  int argc,
611  LInstruction* instr) {
612  __ CallRuntimeSaveDoubles(id);
613  RecordSafepointWithRegisters(
614  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
615 }
616 
617 
618 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
619  Safepoint::DeoptMode mode) {
620  if (!environment->HasBeenRegistered()) {
621  // Physical stack frame layout:
622  // -x ............. -4 0 ..................................... y
623  // [incoming arguments] [spill slots] [pushed outgoing arguments]
624 
625  // Layout of the environment:
626  // 0 ..................................................... size-1
627  // [parameters] [locals] [expression stack including arguments]
628 
629  // Layout of the translation:
630  // 0 ........................................................ size - 1 + 4
631  // [expression stack including arguments] [locals] [4 words] [parameters]
632  // |>------------ translation_size ------------<|
633 
634  int frame_count = 0;
635  int jsframe_count = 0;
636  int args_index = 0;
637  int args_count = 0;
638  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
639  ++frame_count;
640  if (e->frame_type() == JS_FUNCTION) {
641  ++jsframe_count;
642  }
643  }
644  Translation translation(&translations_, frame_count, jsframe_count, zone());
645  WriteTranslation(environment, &translation, &args_index, &args_count);
646  int deoptimization_index = deoptimizations_.length();
647  int pc_offset = masm()->pc_offset();
648  environment->Register(deoptimization_index,
649  translation.index(),
650  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
651  deoptimizations_.Add(environment, zone());
652  }
653 }
654 
655 
656 void LCodeGen::DeoptimizeIf(Condition cc,
657  LEnvironment* environment,
658  Register src1,
659  const Operand& src2) {
660  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
661  ASSERT(environment->HasBeenRegistered());
662  int id = environment->deoptimization_index();
664  if (entry == NULL) {
665  Abort("bailout was not prepared");
666  return;
667  }
668 
669  ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
670 
671  if (FLAG_deopt_every_n_times == 1 &&
672  info_->shared_info()->opt_count() == id) {
673  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
674  return;
675  }
676 
677  if (FLAG_trap_on_deopt) {
678  Label skip;
679  if (cc != al) {
680  __ Branch(&skip, NegateCondition(cc), src1, src2);
681  }
682  __ stop("trap_on_deopt");
683  __ bind(&skip);
684  }
685 
686  // TODO(plind): The Arm port is a little different here, due to their
687  // DeOpt jump table, which is not used for Mips yet.
688  __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
689 }
690 
691 
692 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
693  int length = deoptimizations_.length();
694  if (length == 0) return;
695  Handle<DeoptimizationInputData> data =
696  factory()->NewDeoptimizationInputData(length, TENURED);
697 
698  Handle<ByteArray> translations = translations_.CreateByteArray();
699  data->SetTranslationByteArray(*translations);
700  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
701 
702  Handle<FixedArray> literals =
703  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
704  for (int i = 0; i < deoptimization_literals_.length(); i++) {
705  literals->set(i, *deoptimization_literals_[i]);
706  }
707  data->SetLiteralArray(*literals);
708 
709  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
710  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
711 
712  // Populate the deoptimization entries.
713  for (int i = 0; i < length; i++) {
714  LEnvironment* env = deoptimizations_[i];
715  data->SetAstId(i, env->ast_id());
716  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
717  data->SetArgumentsStackHeight(i,
718  Smi::FromInt(env->arguments_stack_height()));
719  data->SetPc(i, Smi::FromInt(env->pc_offset()));
720  }
721  code->set_deoptimization_data(*data);
722 }
723 
724 
725 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
726  int result = deoptimization_literals_.length();
727  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
728  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
729  }
730  deoptimization_literals_.Add(literal, zone());
731  return result;
732 }
733 
734 
735 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
736  ASSERT(deoptimization_literals_.length() == 0);
737 
738  const ZoneList<Handle<JSFunction> >* inlined_closures =
739  chunk()->inlined_closures();
740 
741  for (int i = 0, length = inlined_closures->length();
742  i < length;
743  i++) {
744  DefineDeoptimizationLiteral(inlined_closures->at(i));
745  }
746 
747  inlined_function_count_ = deoptimization_literals_.length();
748 }
749 
750 
751 void LCodeGen::RecordSafepointWithLazyDeopt(
752  LInstruction* instr, SafepointMode safepoint_mode) {
753  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
754  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
755  } else {
756  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
757  RecordSafepointWithRegisters(
758  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
759  }
760 }
761 
762 
763 void LCodeGen::RecordSafepoint(
764  LPointerMap* pointers,
765  Safepoint::Kind kind,
766  int arguments,
767  Safepoint::DeoptMode deopt_mode) {
768  ASSERT(expected_safepoint_kind_ == kind);
769 
770  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
771  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
772  kind, arguments, deopt_mode);
773  for (int i = 0; i < operands->length(); i++) {
774  LOperand* pointer = operands->at(i);
775  if (pointer->IsStackSlot()) {
776  safepoint.DefinePointerSlot(pointer->index(), zone());
777  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
778  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
779  }
780  }
781  if (kind & Safepoint::kWithRegisters) {
782  // Register cp always contains a pointer to the context.
783  safepoint.DefinePointerRegister(cp, zone());
784  }
785 }
786 
787 
788 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
789  Safepoint::DeoptMode deopt_mode) {
790  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
791 }
792 
793 
794 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
795  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
796  RecordSafepoint(&empty_pointers, deopt_mode);
797 }
798 
799 
800 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
801  int arguments,
802  Safepoint::DeoptMode deopt_mode) {
803  RecordSafepoint(
804  pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
805 }
806 
807 
808 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
809  LPointerMap* pointers,
810  int arguments,
811  Safepoint::DeoptMode deopt_mode) {
812  RecordSafepoint(
813  pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
814 }
815 
816 
817 void LCodeGen::RecordPosition(int position) {
818  if (position == RelocInfo::kNoPosition) return;
819  masm()->positions_recorder()->RecordPosition(position);
820 }
821 
822 
823 void LCodeGen::DoLabel(LLabel* label) {
824  if (label->is_loop_header()) {
825  Comment(";;; B%d - LOOP entry", label->block_id());
826  } else {
827  Comment(";;; B%d", label->block_id());
828  }
829  __ bind(label->label());
830  current_block_ = label->block_id();
831  DoGap(label);
832 }
833 
834 
835 void LCodeGen::DoParallelMove(LParallelMove* move) {
836  resolver_.Resolve(move);
837 }
838 
839 
840 void LCodeGen::DoGap(LGap* gap) {
841  for (int i = LGap::FIRST_INNER_POSITION;
843  i++) {
844  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
845  LParallelMove* move = gap->GetParallelMove(inner_pos);
846  if (move != NULL) DoParallelMove(move);
847  }
848 }
849 
850 
851 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
852  DoGap(instr);
853 }
854 
855 
856 void LCodeGen::DoParameter(LParameter* instr) {
857  // Nothing to do.
858 }
859 
860 
861 void LCodeGen::DoCallStub(LCallStub* instr) {
862  ASSERT(ToRegister(instr->result()).is(v0));
863  switch (instr->hydrogen()->major_key()) {
864  case CodeStub::RegExpConstructResult: {
865  RegExpConstructResultStub stub;
866  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
867  break;
868  }
869  case CodeStub::RegExpExec: {
870  RegExpExecStub stub;
871  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
872  break;
873  }
874  case CodeStub::SubString: {
875  SubStringStub stub;
876  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
877  break;
878  }
879  case CodeStub::NumberToString: {
880  NumberToStringStub stub;
881  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
882  break;
883  }
884  case CodeStub::StringAdd: {
885  StringAddStub stub(NO_STRING_ADD_FLAGS);
886  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
887  break;
888  }
889  case CodeStub::StringCompare: {
890  StringCompareStub stub;
891  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
892  break;
893  }
894  case CodeStub::TranscendentalCache: {
895  __ lw(a0, MemOperand(sp, 0));
896  TranscendentalCacheStub stub(instr->transcendental_type(),
898  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
899  break;
900  }
901  default:
902  UNREACHABLE();
903  }
904 }
905 
906 
907 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
908  // Nothing to do.
909 }
910 
911 
912 void LCodeGen::DoModI(LModI* instr) {
913  Register scratch = scratch0();
914  const Register left = ToRegister(instr->left());
915  const Register result = ToRegister(instr->result());
916 
917  Label done;
918 
919  if (instr->hydrogen()->HasPowerOf2Divisor()) {
920  Register scratch = scratch0();
921  ASSERT(!left.is(scratch));
922  __ mov(scratch, left);
923  int32_t p2constant = HConstant::cast(
924  instr->hydrogen()->right())->Integer32Value();
925  ASSERT(p2constant != 0);
926  // Result always takes the sign of the dividend (left).
927  p2constant = abs(p2constant);
928 
929  Label positive_dividend;
930  __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
931  __ subu(result, zero_reg, left);
932  __ And(result, result, p2constant - 1);
933  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
934  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
935  }
936  __ Branch(USE_DELAY_SLOT, &done);
937  __ subu(result, zero_reg, result);
938  __ bind(&positive_dividend);
939  __ And(result, scratch, p2constant - 1);
940  } else {
941  // div runs in the background while we check for special cases.
942  Register right = EmitLoadRegister(instr->right(), scratch);
943  __ div(left, right);
944 
945  // Check for x % 0.
946  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
947  DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
948  }
949 
950  __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
951  __ mfhi(result);
952 
953  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
954  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
955  }
956  }
957  __ bind(&done);
958 }
959 
960 
961 void LCodeGen::DoDivI(LDivI* instr) {
962  const Register left = ToRegister(instr->left());
963  const Register right = ToRegister(instr->right());
964  const Register result = ToRegister(instr->result());
965 
966  // On MIPS div is asynchronous - it will run in the background while we
967  // check for special cases.
968  __ div(left, right);
969 
970  // Check for x / 0.
971  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
972  DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
973  }
974 
975  // Check for (0 / -x) that will produce negative zero.
976  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
977  Label left_not_zero;
978  __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
979  DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
980  __ bind(&left_not_zero);
981  }
982 
983  // Check for (-kMinInt / -1).
984  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
985  Label left_not_min_int;
986  __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
987  DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
988  __ bind(&left_not_min_int);
989  }
990 
991  __ mfhi(result);
992  DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
993  __ mflo(result);
994 }
995 
996 
997 void LCodeGen::DoMulI(LMulI* instr) {
998  Register scratch = scratch0();
999  Register result = ToRegister(instr->result());
1000  // Note that result may alias left.
1001  Register left = ToRegister(instr->left());
1002  LOperand* right_op = instr->right();
1003 
1004  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1005  bool bailout_on_minus_zero =
1006  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1007 
1008  if (right_op->IsConstantOperand() && !can_overflow) {
1009  // Use optimized code for specific constants.
1010  int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1011 
1012  if (bailout_on_minus_zero && (constant < 0)) {
1013  // The case of a null constant will be handled separately.
1014  // If constant is negative and left is null, the result should be -0.
1015  DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1016  }
1017 
1018  switch (constant) {
1019  case -1:
1020  __ Subu(result, zero_reg, left);
1021  break;
1022  case 0:
1023  if (bailout_on_minus_zero) {
1024  // If left is strictly negative and the constant is null, the
1025  // result is -0. Deoptimize if required, otherwise return 0.
1026  DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
1027  }
1028  __ mov(result, zero_reg);
1029  break;
1030  case 1:
1031  // Nothing to do.
1032  __ Move(result, left);
1033  break;
1034  default:
1035  // Multiplying by powers of two and powers of two plus or minus
1036  // one can be done faster with shifted operands.
1037  // For other constants we emit standard code.
1038  int32_t mask = constant >> 31;
1039  uint32_t constant_abs = (constant + mask) ^ mask;
1040 
1041  if (IsPowerOf2(constant_abs) ||
1042  IsPowerOf2(constant_abs - 1) ||
1043  IsPowerOf2(constant_abs + 1)) {
1044  if (IsPowerOf2(constant_abs)) {
1045  int32_t shift = WhichPowerOf2(constant_abs);
1046  __ sll(result, left, shift);
1047  } else if (IsPowerOf2(constant_abs - 1)) {
1048  int32_t shift = WhichPowerOf2(constant_abs - 1);
1049  __ sll(result, left, shift);
1050  __ Addu(result, result, left);
1051  } else if (IsPowerOf2(constant_abs + 1)) {
1052  int32_t shift = WhichPowerOf2(constant_abs + 1);
1053  __ sll(result, left, shift);
1054  __ Subu(result, result, left);
1055  }
1056 
1057  // Correct the sign of the result is the constant is negative.
1058  if (constant < 0) {
1059  __ Subu(result, zero_reg, result);
1060  }
1061 
1062  } else {
1063  // Generate standard code.
1064  __ li(at, constant);
1065  __ Mul(result, left, at);
1066  }
1067  }
1068 
1069  } else {
1070  Register right = EmitLoadRegister(right_op, scratch);
1071  if (bailout_on_minus_zero) {
1072  __ Or(ToRegister(instr->temp()), left, right);
1073  }
1074 
1075  if (can_overflow) {
1076  // hi:lo = left * right.
1077  __ mult(left, right);
1078  __ mfhi(scratch);
1079  __ mflo(result);
1080  __ sra(at, result, 31);
1081  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1082  } else {
1083  __ Mul(result, left, right);
1084  }
1085 
1086  if (bailout_on_minus_zero) {
1087  // Bail out if the result is supposed to be negative zero.
1088  Label done;
1089  __ Branch(&done, ne, result, Operand(zero_reg));
1090  DeoptimizeIf(lt,
1091  instr->environment(),
1092  ToRegister(instr->temp()),
1093  Operand(zero_reg));
1094  __ bind(&done);
1095  }
1096  }
1097 }
1098 
1099 
1100 void LCodeGen::DoBitI(LBitI* instr) {
1101  LOperand* left_op = instr->left();
1102  LOperand* right_op = instr->right();
1103  ASSERT(left_op->IsRegister());
1104  Register left = ToRegister(left_op);
1105  Register result = ToRegister(instr->result());
1106  Operand right(no_reg);
1107 
1108  if (right_op->IsStackSlot() || right_op->IsArgument()) {
1109  right = Operand(EmitLoadRegister(right_op, at));
1110  } else {
1111  ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1112  right = ToOperand(right_op);
1113  }
1114 
1115  switch (instr->op()) {
1116  case Token::BIT_AND:
1117  __ And(result, left, right);
1118  break;
1119  case Token::BIT_OR:
1120  __ Or(result, left, right);
1121  break;
1122  case Token::BIT_XOR:
1123  __ Xor(result, left, right);
1124  break;
1125  default:
1126  UNREACHABLE();
1127  break;
1128  }
1129 }
1130 
1131 
1132 void LCodeGen::DoShiftI(LShiftI* instr) {
1133  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1134  // result may alias either of them.
1135  LOperand* right_op = instr->right();
1136  Register left = ToRegister(instr->left());
1137  Register result = ToRegister(instr->result());
1138 
1139  if (right_op->IsRegister()) {
1140  // No need to mask the right operand on MIPS, it is built into the variable
1141  // shift instructions.
1142  switch (instr->op()) {
1143  case Token::SAR:
1144  __ srav(result, left, ToRegister(right_op));
1145  break;
1146  case Token::SHR:
1147  __ srlv(result, left, ToRegister(right_op));
1148  if (instr->can_deopt()) {
1149  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1150  }
1151  break;
1152  case Token::SHL:
1153  __ sllv(result, left, ToRegister(right_op));
1154  break;
1155  default:
1156  UNREACHABLE();
1157  break;
1158  }
1159  } else {
1160  // Mask the right_op operand.
1161  int value = ToInteger32(LConstantOperand::cast(right_op));
1162  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1163  switch (instr->op()) {
1164  case Token::SAR:
1165  if (shift_count != 0) {
1166  __ sra(result, left, shift_count);
1167  } else {
1168  __ Move(result, left);
1169  }
1170  break;
1171  case Token::SHR:
1172  if (shift_count != 0) {
1173  __ srl(result, left, shift_count);
1174  } else {
1175  if (instr->can_deopt()) {
1176  __ And(at, left, Operand(0x80000000));
1177  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1178  }
1179  __ Move(result, left);
1180  }
1181  break;
1182  case Token::SHL:
1183  if (shift_count != 0) {
1184  __ sll(result, left, shift_count);
1185  } else {
1186  __ Move(result, left);
1187  }
1188  break;
1189  default:
1190  UNREACHABLE();
1191  break;
1192  }
1193  }
1194 }
1195 
1196 
1197 void LCodeGen::DoSubI(LSubI* instr) {
1198  LOperand* left = instr->left();
1199  LOperand* right = instr->right();
1200  LOperand* result = instr->result();
1201  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1202 
1203  if (!can_overflow) {
1204  if (right->IsStackSlot() || right->IsArgument()) {
1205  Register right_reg = EmitLoadRegister(right, at);
1206  __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1207  } else {
1208  ASSERT(right->IsRegister() || right->IsConstantOperand());
1209  __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1210  }
1211  } else { // can_overflow.
1212  Register overflow = scratch0();
1213  Register scratch = scratch1();
1214  if (right->IsStackSlot() ||
1215  right->IsArgument() ||
1216  right->IsConstantOperand()) {
1217  Register right_reg = EmitLoadRegister(right, scratch);
1218  __ SubuAndCheckForOverflow(ToRegister(result),
1219  ToRegister(left),
1220  right_reg,
1221  overflow); // Reg at also used as scratch.
1222  } else {
1223  ASSERT(right->IsRegister());
1224  // Due to overflow check macros not supporting constant operands,
1225  // handling the IsConstantOperand case was moved to prev if clause.
1226  __ SubuAndCheckForOverflow(ToRegister(result),
1227  ToRegister(left),
1228  ToRegister(right),
1229  overflow); // Reg at also used as scratch.
1230  }
1231  DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1232  }
1233 }
1234 
1235 
1236 void LCodeGen::DoConstantI(LConstantI* instr) {
1237  ASSERT(instr->result()->IsRegister());
1238  __ li(ToRegister(instr->result()), Operand(instr->value()));
1239 }
1240 
1241 
1242 void LCodeGen::DoConstantD(LConstantD* instr) {
1243  ASSERT(instr->result()->IsDoubleRegister());
1244  DoubleRegister result = ToDoubleRegister(instr->result());
1245  double v = instr->value();
1246  __ Move(result, v);
1247 }
1248 
1249 
1250 void LCodeGen::DoConstantT(LConstantT* instr) {
1251  Handle<Object> value = instr->value();
1252  if (value->IsSmi()) {
1253  __ li(ToRegister(instr->result()), Operand(value));
1254  } else {
1255  __ LoadHeapObject(ToRegister(instr->result()),
1256  Handle<HeapObject>::cast(value));
1257  }
1258 }
1259 
1260 
1261 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1262  Register result = ToRegister(instr->result());
1263  Register array = ToRegister(instr->value());
1264  __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
1265 }
1266 
1267 
1268 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1269  Register result = ToRegister(instr->result());
1270  Register array = ToRegister(instr->value());
1271  __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1272 }
1273 
1274 
1275 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1276  Register result = ToRegister(instr->result());
1277  Register map = ToRegister(instr->value());
1278  __ EnumLength(result, map);
1279 }
1280 
1281 
1282 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1283  Register result = ToRegister(instr->result());
1284  Register input = ToRegister(instr->value());
1285 
1286  // Load map into |result|.
1287  __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
1288  // Load the map's "bit field 2" into |result|. We only need the first byte,
1289  // but the following bit field extraction takes care of that anyway.
1290  __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
1291  // Retrieve elements_kind from bit field 2.
1293 }
1294 
1295 
1296 void LCodeGen::DoValueOf(LValueOf* instr) {
1297  Register input = ToRegister(instr->value());
1298  Register result = ToRegister(instr->result());
1299  Register map = ToRegister(instr->temp());
1300  Label done;
1301 
1302  // If the object is a smi return the object.
1303  __ Move(result, input);
1304  __ JumpIfSmi(input, &done);
1305 
1306  // If the object is not a value type, return the object.
1307  __ GetObjectType(input, map, map);
1308  __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
1309  __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
1310 
1311  __ bind(&done);
1312 }
1313 
1314 
1315 void LCodeGen::DoDateField(LDateField* instr) {
1316  Register object = ToRegister(instr->date());
1317  Register result = ToRegister(instr->result());
1318  Register scratch = ToRegister(instr->temp());
1319  Smi* index = instr->index();
1320  Label runtime, done;
1321  ASSERT(object.is(a0));
1322  ASSERT(result.is(v0));
1323  ASSERT(!scratch.is(scratch0()));
1324  ASSERT(!scratch.is(object));
1325 
1326  __ And(at, object, Operand(kSmiTagMask));
1327  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1328  __ GetObjectType(object, scratch, scratch);
1329  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
1330 
1331  if (index->value() == 0) {
1332  __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1333  } else {
1334  if (index->value() < JSDate::kFirstUncachedField) {
1335  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1336  __ li(scratch, Operand(stamp));
1337  __ lw(scratch, MemOperand(scratch));
1338  __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1339  __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1340  __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1341  kPointerSize * index->value()));
1342  __ jmp(&done);
1343  }
1344  __ bind(&runtime);
1345  __ PrepareCallCFunction(2, scratch);
1346  __ li(a1, Operand(index));
1347  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1348  __ bind(&done);
1349  }
1350 }
1351 
1352 
1353 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1354  Register input = ToRegister(instr->value());
1355  Register result = ToRegister(instr->result());
1356  __ Nor(result, zero_reg, Operand(input));
1357 }
1358 
1359 
1360 void LCodeGen::DoThrow(LThrow* instr) {
1361  Register input_reg = EmitLoadRegister(instr->value(), at);
1362  __ push(input_reg);
1363  CallRuntime(Runtime::kThrow, 1, instr);
1364 
1365  if (FLAG_debug_code) {
1366  __ stop("Unreachable code.");
1367  }
1368 }
1369 
1370 
1371 void LCodeGen::DoAddI(LAddI* instr) {
1372  LOperand* left = instr->left();
1373  LOperand* right = instr->right();
1374  LOperand* result = instr->result();
1375  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1376 
1377  if (!can_overflow) {
1378  if (right->IsStackSlot() || right->IsArgument()) {
1379  Register right_reg = EmitLoadRegister(right, at);
1380  __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1381  } else {
1382  ASSERT(right->IsRegister() || right->IsConstantOperand());
1383  __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1384  }
1385  } else { // can_overflow.
1386  Register overflow = scratch0();
1387  Register scratch = scratch1();
1388  if (right->IsStackSlot() ||
1389  right->IsArgument() ||
1390  right->IsConstantOperand()) {
1391  Register right_reg = EmitLoadRegister(right, scratch);
1392  __ AdduAndCheckForOverflow(ToRegister(result),
1393  ToRegister(left),
1394  right_reg,
1395  overflow); // Reg at also used as scratch.
1396  } else {
1397  ASSERT(right->IsRegister());
1398  // Due to overflow check macros not supporting constant operands,
1399  // handling the IsConstantOperand case was moved to prev if clause.
1400  __ AdduAndCheckForOverflow(ToRegister(result),
1401  ToRegister(left),
1402  ToRegister(right),
1403  overflow); // Reg at also used as scratch.
1404  }
1405  DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1406  }
1407 }
1408 
1409 
1410 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1411  LOperand* left = instr->left();
1412  LOperand* right = instr->right();
1413  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1414  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1415  if (instr->hydrogen()->representation().IsInteger32()) {
1416  Register left_reg = ToRegister(left);
1417  Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1418  ? ToOperand(right)
1419  : Operand(EmitLoadRegister(right, at));
1420  Register result_reg = ToRegister(instr->result());
1421  Label return_right, done;
1422  if (!result_reg.is(left_reg)) {
1423  __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
1424  __ mov(result_reg, left_reg);
1425  __ Branch(&done);
1426  }
1427  __ Branch(&done, condition, left_reg, right_op);
1428  __ bind(&return_right);
1429  __ Addu(result_reg, zero_reg, right_op);
1430  __ bind(&done);
1431  } else {
1432  ASSERT(instr->hydrogen()->representation().IsDouble());
1433  FPURegister left_reg = ToDoubleRegister(left);
1434  FPURegister right_reg = ToDoubleRegister(right);
1435  FPURegister result_reg = ToDoubleRegister(instr->result());
1436  Label check_nan_left, check_zero, return_left, return_right, done;
1437  __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1438  __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1439  __ Branch(&return_right);
1440 
1441  __ bind(&check_zero);
1442  // left == right != 0.
1443  __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1444  // At this point, both left and right are either 0 or -0.
1445  if (operation == HMathMinMax::kMathMin) {
1446  __ neg_d(left_reg, left_reg);
1447  __ sub_d(result_reg, left_reg, right_reg);
1448  __ neg_d(result_reg, result_reg);
1449  } else {
1450  __ add_d(result_reg, left_reg, right_reg);
1451  }
1452  __ Branch(&done);
1453 
1454  __ bind(&check_nan_left);
1455  // left == NaN.
1456  __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1457  __ bind(&return_right);
1458  if (!right_reg.is(result_reg)) {
1459  __ mov_d(result_reg, right_reg);
1460  }
1461  __ Branch(&done);
1462 
1463  __ bind(&return_left);
1464  if (!left_reg.is(result_reg)) {
1465  __ mov_d(result_reg, left_reg);
1466  }
1467  __ bind(&done);
1468  }
1469 }
1470 
1471 
1472 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1473  DoubleRegister left = ToDoubleRegister(instr->left());
1474  DoubleRegister right = ToDoubleRegister(instr->right());
1475  DoubleRegister result = ToDoubleRegister(instr->result());
1476  switch (instr->op()) {
1477  case Token::ADD:
1478  __ add_d(result, left, right);
1479  break;
1480  case Token::SUB:
1481  __ sub_d(result, left, right);
1482  break;
1483  case Token::MUL:
1484  __ mul_d(result, left, right);
1485  break;
1486  case Token::DIV:
1487  __ div_d(result, left, right);
1488  break;
1489  case Token::MOD: {
1490  // Save a0-a3 on the stack.
1491  RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1492  __ MultiPush(saved_regs);
1493 
1494  __ PrepareCallCFunction(0, 2, scratch0());
1495  __ SetCallCDoubleArguments(left, right);
1496  __ CallCFunction(
1497  ExternalReference::double_fp_operation(Token::MOD, isolate()),
1498  0, 2);
1499  // Move the result in the double result register.
1500  __ GetCFunctionDoubleResult(result);
1501 
1502  // Restore saved register.
1503  __ MultiPop(saved_regs);
1504  break;
1505  }
1506  default:
1507  UNREACHABLE();
1508  break;
1509  }
1510 }
1511 
1512 
1513 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1514  ASSERT(ToRegister(instr->left()).is(a1));
1515  ASSERT(ToRegister(instr->right()).is(a0));
1516  ASSERT(ToRegister(instr->result()).is(v0));
1517 
1518  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1519  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1520  // Other arch use a nop here, to signal that there is no inlined
1521  // patchable code. Mips does not need the nop, since our marker
1522  // instruction (andi zero_reg) will never be used in normal code.
1523 }
1524 
1525 
1526 int LCodeGen::GetNextEmittedBlock(int block) {
1527  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1528  LLabel* label = chunk_->GetLabel(i);
1529  if (!label->HasReplacement()) return i;
1530  }
1531  return -1;
1532 }
1533 
1534 
1535 void LCodeGen::EmitBranch(int left_block, int right_block,
1536  Condition cc, Register src1, const Operand& src2) {
1537  int next_block = GetNextEmittedBlock(current_block_);
1538  right_block = chunk_->LookupDestination(right_block);
1539  left_block = chunk_->LookupDestination(left_block);
1540  if (right_block == left_block) {
1541  EmitGoto(left_block);
1542  } else if (left_block == next_block) {
1543  __ Branch(chunk_->GetAssemblyLabel(right_block),
1544  NegateCondition(cc), src1, src2);
1545  } else if (right_block == next_block) {
1546  __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1547  } else {
1548  __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1549  __ Branch(chunk_->GetAssemblyLabel(right_block));
1550  }
1551 }
1552 
1553 
1554 void LCodeGen::EmitBranchF(int left_block, int right_block,
1555  Condition cc, FPURegister src1, FPURegister src2) {
1556  int next_block = GetNextEmittedBlock(current_block_);
1557  right_block = chunk_->LookupDestination(right_block);
1558  left_block = chunk_->LookupDestination(left_block);
1559  if (right_block == left_block) {
1560  EmitGoto(left_block);
1561  } else if (left_block == next_block) {
1562  __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1563  NegateCondition(cc), src1, src2);
1564  } else if (right_block == next_block) {
1565  __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1566  } else {
1567  __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1568  __ Branch(chunk_->GetAssemblyLabel(right_block));
1569  }
1570 }
1571 
1572 
1573 void LCodeGen::DoBranch(LBranch* instr) {
1574  int true_block = chunk_->LookupDestination(instr->true_block_id());
1575  int false_block = chunk_->LookupDestination(instr->false_block_id());
1576 
1577  Representation r = instr->hydrogen()->value()->representation();
1578  if (r.IsInteger32()) {
1579  Register reg = ToRegister(instr->value());
1580  EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1581  } else if (r.IsDouble()) {
1582  DoubleRegister reg = ToDoubleRegister(instr->value());
1583  // Test the double value. Zero and NaN are false.
1584  EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
1585  } else {
1586  ASSERT(r.IsTagged());
1587  Register reg = ToRegister(instr->value());
1588  HType type = instr->hydrogen()->value()->type();
1589  if (type.IsBoolean()) {
1590  __ LoadRoot(at, Heap::kTrueValueRootIndex);
1591  EmitBranch(true_block, false_block, eq, reg, Operand(at));
1592  } else if (type.IsSmi()) {
1593  EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1594  } else {
1595  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1596  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1597 
1598  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1599  // Avoid deopts in the case where we've never executed this path before.
1600  if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1601 
1602  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1603  // undefined -> false.
1604  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1605  __ Branch(false_label, eq, reg, Operand(at));
1606  }
1607  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1608  // Boolean -> its value.
1609  __ LoadRoot(at, Heap::kTrueValueRootIndex);
1610  __ Branch(true_label, eq, reg, Operand(at));
1611  __ LoadRoot(at, Heap::kFalseValueRootIndex);
1612  __ Branch(false_label, eq, reg, Operand(at));
1613  }
1614  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1615  // 'null' -> false.
1616  __ LoadRoot(at, Heap::kNullValueRootIndex);
1617  __ Branch(false_label, eq, reg, Operand(at));
1618  }
1619 
1620  if (expected.Contains(ToBooleanStub::SMI)) {
1621  // Smis: 0 -> false, all other -> true.
1622  __ Branch(false_label, eq, reg, Operand(zero_reg));
1623  __ JumpIfSmi(reg, true_label);
1624  } else if (expected.NeedsMap()) {
1625  // If we need a map later and have a Smi -> deopt.
1626  __ And(at, reg, Operand(kSmiTagMask));
1627  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1628  }
1629 
1630  const Register map = scratch0();
1631  if (expected.NeedsMap()) {
1632  __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1633  if (expected.CanBeUndetectable()) {
1634  // Undetectable -> false.
1635  __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1636  __ And(at, at, Operand(1 << Map::kIsUndetectable));
1637  __ Branch(false_label, ne, at, Operand(zero_reg));
1638  }
1639  }
1640 
1641  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1642  // spec object -> true.
1644  __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1645  }
1646 
1647  if (expected.Contains(ToBooleanStub::STRING)) {
1648  // String value -> false iff empty.
1649  Label not_string;
1651  __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
1653  __ Branch(true_label, ne, at, Operand(zero_reg));
1654  __ Branch(false_label);
1655  __ bind(&not_string);
1656  }
1657 
1658  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1659  // heap number -> false iff +0, -0, or NaN.
1660  DoubleRegister dbl_scratch = double_scratch0();
1661  Label not_heap_number;
1662  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1663  __ Branch(&not_heap_number, ne, map, Operand(at));
1664  __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1665  __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
1666  // Falls through if dbl_scratch == 0.
1667  __ Branch(false_label);
1668  __ bind(&not_heap_number);
1669  }
1670 
1671  // We've seen something for the first time -> deopt.
1672  DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
1673  }
1674  }
1675 }
1676 
1677 
1678 void LCodeGen::EmitGoto(int block) {
1679  block = chunk_->LookupDestination(block);
1680  int next_block = GetNextEmittedBlock(current_block_);
1681  if (block != next_block) {
1682  __ jmp(chunk_->GetAssemblyLabel(block));
1683  }
1684 }
1685 
1686 
1687 void LCodeGen::DoGoto(LGoto* instr) {
1688  EmitGoto(instr->block_id());
1689 }
1690 
1691 
1692 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1693  Condition cond = kNoCondition;
1694  switch (op) {
1695  case Token::EQ:
1696  case Token::EQ_STRICT:
1697  cond = eq;
1698  break;
1699  case Token::LT:
1700  cond = is_unsigned ? lo : lt;
1701  break;
1702  case Token::GT:
1703  cond = is_unsigned ? hi : gt;
1704  break;
1705  case Token::LTE:
1706  cond = is_unsigned ? ls : le;
1707  break;
1708  case Token::GTE:
1709  cond = is_unsigned ? hs : ge;
1710  break;
1711  case Token::IN:
1712  case Token::INSTANCEOF:
1713  default:
1714  UNREACHABLE();
1715  }
1716  return cond;
1717 }
1718 
1719 
1720 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1721  LOperand* left = instr->left();
1722  LOperand* right = instr->right();
1723  int false_block = chunk_->LookupDestination(instr->false_block_id());
1724  int true_block = chunk_->LookupDestination(instr->true_block_id());
1725 
1726  Condition cond = TokenToCondition(instr->op(), false);
1727 
1728  if (left->IsConstantOperand() && right->IsConstantOperand()) {
1729  // We can statically evaluate the comparison.
1730  double left_val = ToDouble(LConstantOperand::cast(left));
1731  double right_val = ToDouble(LConstantOperand::cast(right));
1732  int next_block =
1733  EvalComparison(instr->op(), left_val, right_val) ? true_block
1734  : false_block;
1735  EmitGoto(next_block);
1736  } else {
1737  if (instr->is_double()) {
1738  // Compare left and right as doubles and load the
1739  // resulting flags into the normal status register.
1740  FPURegister left_reg = ToDoubleRegister(left);
1741  FPURegister right_reg = ToDoubleRegister(right);
1742 
1743  // If a NaN is involved, i.e. the result is unordered,
1744  // jump to false block label.
1745  __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
1746  left_reg, right_reg);
1747 
1748  EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
1749  } else {
1750  Register cmp_left;
1751  Operand cmp_right = Operand(0);
1752 
1753  if (right->IsConstantOperand()) {
1754  cmp_left = ToRegister(left);
1755  cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
1756  } else if (left->IsConstantOperand()) {
1757  cmp_left = ToRegister(right);
1758  cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
1759  // We transposed the operands. Reverse the condition.
1760  cond = ReverseCondition(cond);
1761  } else {
1762  cmp_left = ToRegister(left);
1763  cmp_right = Operand(ToRegister(right));
1764  }
1765 
1766  EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
1767  }
1768  }
1769 }
1770 
1771 
1772 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1773  Register left = ToRegister(instr->left());
1774  Register right = ToRegister(instr->right());
1775  int false_block = chunk_->LookupDestination(instr->false_block_id());
1776  int true_block = chunk_->LookupDestination(instr->true_block_id());
1777 
1778  EmitBranch(true_block, false_block, eq, left, Operand(right));
1779 }
1780 
1781 
1782 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1783  Register left = ToRegister(instr->left());
1784  int true_block = chunk_->LookupDestination(instr->true_block_id());
1785  int false_block = chunk_->LookupDestination(instr->false_block_id());
1786 
1787  EmitBranch(true_block, false_block, eq, left,
1788  Operand(instr->hydrogen()->right()));
1789 }
1790 
1791 
1792 
1793 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1794  Register scratch = scratch0();
1795  Register reg = ToRegister(instr->value());
1796  int false_block = chunk_->LookupDestination(instr->false_block_id());
1797 
1798  // If the expression is known to be untagged or a smi, then it's definitely
1799  // not null, and it can't be a an undetectable object.
1800  if (instr->hydrogen()->representation().IsSpecialization() ||
1801  instr->hydrogen()->type().IsSmi()) {
1802  EmitGoto(false_block);
1803  return;
1804  }
1805 
1806  int true_block = chunk_->LookupDestination(instr->true_block_id());
1807 
1808  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1809  Heap::kNullValueRootIndex :
1810  Heap::kUndefinedValueRootIndex;
1811  __ LoadRoot(at, nil_value);
1812  if (instr->kind() == kStrictEquality) {
1813  EmitBranch(true_block, false_block, eq, reg, Operand(at));
1814  } else {
1815  Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1816  Heap::kUndefinedValueRootIndex :
1817  Heap::kNullValueRootIndex;
1818  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1819  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1820  __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1821  __ LoadRoot(at, other_nil_value); // In the delay slot.
1822  __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1823  __ JumpIfSmi(reg, false_label); // In the delay slot.
1824  // Check for undetectable objects by looking in the bit field in
1825  // the map. The object has already been smi checked.
1826  __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1827  __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1828  __ And(scratch, scratch, 1 << Map::kIsUndetectable);
1829  EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
1830  }
1831 }
1832 
1833 
1834 Condition LCodeGen::EmitIsObject(Register input,
1835  Register temp1,
1836  Register temp2,
1837  Label* is_not_object,
1838  Label* is_object) {
1839  __ JumpIfSmi(input, is_not_object);
1840 
1841  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1842  __ Branch(is_object, eq, input, Operand(temp2));
1843 
1844  // Load map.
1845  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1846  // Undetectable objects behave like undefined.
1847  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1848  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
1849  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
1850 
1851  // Load instance type and check that it is in object type range.
1852  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
1853  __ Branch(is_not_object,
1854  lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1855 
1856  return le;
1857 }
1858 
1859 
1860 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1861  Register reg = ToRegister(instr->value());
1862  Register temp1 = ToRegister(instr->temp());
1863  Register temp2 = scratch0();
1864 
1865  int true_block = chunk_->LookupDestination(instr->true_block_id());
1866  int false_block = chunk_->LookupDestination(instr->false_block_id());
1867  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1868  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1869 
1870  Condition true_cond =
1871  EmitIsObject(reg, temp1, temp2, false_label, true_label);
1872 
1873  EmitBranch(true_block, false_block, true_cond, temp2,
1875 }
1876 
1877 
1878 Condition LCodeGen::EmitIsString(Register input,
1879  Register temp1,
1880  Label* is_not_string) {
1881  __ JumpIfSmi(input, is_not_string);
1882  __ GetObjectType(input, temp1, temp1);
1883 
1884  return lt;
1885 }
1886 
1887 
1888 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1889  Register reg = ToRegister(instr->value());
1890  Register temp1 = ToRegister(instr->temp());
1891 
1892  int true_block = chunk_->LookupDestination(instr->true_block_id());
1893  int false_block = chunk_->LookupDestination(instr->false_block_id());
1894  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1895 
1896  Condition true_cond =
1897  EmitIsString(reg, temp1, false_label);
1898 
1899  EmitBranch(true_block, false_block, true_cond, temp1,
1900  Operand(FIRST_NONSTRING_TYPE));
1901 }
1902 
1903 
1904 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1905  int true_block = chunk_->LookupDestination(instr->true_block_id());
1906  int false_block = chunk_->LookupDestination(instr->false_block_id());
1907 
1908  Register input_reg = EmitLoadRegister(instr->value(), at);
1909  __ And(at, input_reg, kSmiTagMask);
1910  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1911 }
1912 
1913 
1914 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1915  Register input = ToRegister(instr->value());
1916  Register temp = ToRegister(instr->temp());
1917 
1918  int true_block = chunk_->LookupDestination(instr->true_block_id());
1919  int false_block = chunk_->LookupDestination(instr->false_block_id());
1920 
1921  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1922  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
1923  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1924  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
1925  EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
1926 }
1927 
1928 
1929 static Condition ComputeCompareCondition(Token::Value op) {
1930  switch (op) {
1931  case Token::EQ_STRICT:
1932  case Token::EQ:
1933  return eq;
1934  case Token::LT:
1935  return lt;
1936  case Token::GT:
1937  return gt;
1938  case Token::LTE:
1939  return le;
1940  case Token::GTE:
1941  return ge;
1942  default:
1943  UNREACHABLE();
1944  return kNoCondition;
1945  }
1946 }
1947 
1948 
1949 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1950  Token::Value op = instr->op();
1951  int true_block = chunk_->LookupDestination(instr->true_block_id());
1952  int false_block = chunk_->LookupDestination(instr->false_block_id());
1953 
1954  Handle<Code> ic = CompareIC::GetUninitialized(op);
1955  CallCode(ic, RelocInfo::CODE_TARGET, instr);
1956 
1957  Condition condition = ComputeCompareCondition(op);
1958 
1959  EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
1960 }
1961 
1962 
1963 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1964  InstanceType from = instr->from();
1965  InstanceType to = instr->to();
1966  if (from == FIRST_TYPE) return to;
1967  ASSERT(from == to || to == LAST_TYPE);
1968  return from;
1969 }
1970 
1971 
1972 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1973  InstanceType from = instr->from();
1974  InstanceType to = instr->to();
1975  if (from == to) return eq;
1976  if (to == LAST_TYPE) return hs;
1977  if (from == FIRST_TYPE) return ls;
1978  UNREACHABLE();
1979  return eq;
1980 }
1981 
1982 
1983 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1984  Register scratch = scratch0();
1985  Register input = ToRegister(instr->value());
1986 
1987  int true_block = chunk_->LookupDestination(instr->true_block_id());
1988  int false_block = chunk_->LookupDestination(instr->false_block_id());
1989 
1990  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1991 
1992  __ JumpIfSmi(input, false_label);
1993 
1994  __ GetObjectType(input, scratch, scratch);
1995  EmitBranch(true_block,
1996  false_block,
1997  BranchCondition(instr->hydrogen()),
1998  scratch,
1999  Operand(TestType(instr->hydrogen())));
2000 }
2001 
2002 
2003 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2004  Register input = ToRegister(instr->value());
2005  Register result = ToRegister(instr->result());
2006 
2007  __ AssertString(input);
2008 
2009  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2010  __ IndexFromHash(result, result);
2011 }
2012 
2013 
2014 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2015  LHasCachedArrayIndexAndBranch* instr) {
2016  Register input = ToRegister(instr->value());
2017  Register scratch = scratch0();
2018 
2019  int true_block = chunk_->LookupDestination(instr->true_block_id());
2020  int false_block = chunk_->LookupDestination(instr->false_block_id());
2021 
2022  __ lw(scratch,
2024  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2025  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
2026 }
2027 
2028 
2029 // Branches to a label or falls through with the answer in flags. Trashes
2030 // the temp registers, but not the input.
2031 void LCodeGen::EmitClassOfTest(Label* is_true,
2032  Label* is_false,
2033  Handle<String>class_name,
2034  Register input,
2035  Register temp,
2036  Register temp2) {
2037  ASSERT(!input.is(temp));
2038  ASSERT(!input.is(temp2));
2039  ASSERT(!temp.is(temp2));
2040 
2041  __ JumpIfSmi(input, is_false);
2042 
2043  if (class_name->IsEqualTo(CStrVector("Function"))) {
2044  // Assuming the following assertions, we can use the same compares to test
2045  // for both being a function type and being in the object type range.
2050  LAST_SPEC_OBJECT_TYPE - 1);
2052 
2053  __ GetObjectType(input, temp, temp2);
2054  __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2055  __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2056  __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2057  } else {
2058  // Faster code path to avoid two compares: subtract lower bound from the
2059  // actual type and do a signed compare with the width of the type range.
2060  __ GetObjectType(input, temp, temp2);
2061  __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2062  __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2064  }
2065 
2066  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2067  // Check if the constructor in the map is a function.
2068  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2069 
2070  // Objects with a non-function constructor have class 'Object'.
2071  __ GetObjectType(temp, temp2, temp2);
2072  if (class_name->IsEqualTo(CStrVector("Object"))) {
2073  __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2074  } else {
2075  __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2076  }
2077 
2078  // temp now contains the constructor function. Grab the
2079  // instance class name from there.
2081  __ lw(temp, FieldMemOperand(temp,
2083  // The class name we are testing against is a symbol because it's a literal.
2084  // The name in the constructor is a symbol because of the way the context is
2085  // booted. This routine isn't expected to work for random API-created
2086  // classes and it doesn't have to because you can't access it with natives
2087  // syntax. Since both sides are symbols it is sufficient to use an identity
2088  // comparison.
2089 
2090  // End with the address of this class_name instance in temp register.
2091  // On MIPS, the caller must do the comparison with Handle<String>class_name.
2092 }
2093 
2094 
2095 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2096  Register input = ToRegister(instr->value());
2097  Register temp = scratch0();
2098  Register temp2 = ToRegister(instr->temp());
2099  Handle<String> class_name = instr->hydrogen()->class_name();
2100 
2101  int true_block = chunk_->LookupDestination(instr->true_block_id());
2102  int false_block = chunk_->LookupDestination(instr->false_block_id());
2103 
2104  Label* true_label = chunk_->GetAssemblyLabel(true_block);
2105  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2106 
2107  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2108 
2109  EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
2110 }
2111 
2112 
2113 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2114  Register reg = ToRegister(instr->value());
2115  Register temp = ToRegister(instr->temp());
2116  int true_block = instr->true_block_id();
2117  int false_block = instr->false_block_id();
2118 
2119  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2120  EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
2121 }
2122 
2123 
2124 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2125  Label true_label, done;
2126  ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
2127  ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
2128  Register result = ToRegister(instr->result());
2129  ASSERT(result.is(v0));
2130 
2131  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2132  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2133 
2134  __ Branch(&true_label, eq, result, Operand(zero_reg));
2135  __ li(result, Operand(factory()->false_value()));
2136  __ Branch(&done);
2137  __ bind(&true_label);
2138  __ li(result, Operand(factory()->true_value()));
2139  __ bind(&done);
2140 }
2141 
2142 
2143 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2144  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2145  public:
2146  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2147  LInstanceOfKnownGlobal* instr)
2148  : LDeferredCode(codegen), instr_(instr) { }
2149  virtual void Generate() {
2150  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2151  }
2152  virtual LInstruction* instr() { return instr_; }
2153  Label* map_check() { return &map_check_; }
2154 
2155  private:
2156  LInstanceOfKnownGlobal* instr_;
2157  Label map_check_;
2158  };
2159 
2160  DeferredInstanceOfKnownGlobal* deferred;
2161  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2162 
2163  Label done, false_result;
2164  Register object = ToRegister(instr->value());
2165  Register temp = ToRegister(instr->temp());
2166  Register result = ToRegister(instr->result());
2167 
2168  ASSERT(object.is(a0));
2169  ASSERT(result.is(v0));
2170 
2171  // A Smi is not instance of anything.
2172  __ JumpIfSmi(object, &false_result);
2173 
2174  // This is the inlined call site instanceof cache. The two occurences of the
2175  // hole value will be patched to the last map/result pair generated by the
2176  // instanceof stub.
2177  Label cache_miss;
2178  Register map = temp;
2179  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2180 
2181  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2182  __ bind(deferred->map_check()); // Label for calculating code patching.
2183  // We use Factory::the_hole_value() on purpose instead of loading from the
2184  // root array to force relocation to be able to later patch with
2185  // the cached map.
2186  Handle<JSGlobalPropertyCell> cell =
2187  factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2188  __ li(at, Operand(Handle<Object>(cell)));
2190  __ Branch(&cache_miss, ne, map, Operand(at));
2191  // We use Factory::the_hole_value() on purpose instead of loading from the
2192  // root array to force relocation to be able to later patch
2193  // with true or false.
2194  __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2195  __ Branch(&done);
2196 
2197  // The inlined call site cache did not match. Check null and string before
2198  // calling the deferred code.
2199  __ bind(&cache_miss);
2200  // Null is not instance of anything.
2201  __ LoadRoot(temp, Heap::kNullValueRootIndex);
2202  __ Branch(&false_result, eq, object, Operand(temp));
2203 
2204  // String values is not instance of anything.
2205  Condition cc = __ IsObjectStringType(object, temp, temp);
2206  __ Branch(&false_result, cc, temp, Operand(zero_reg));
2207 
2208  // Go to the deferred code.
2209  __ Branch(deferred->entry());
2210 
2211  __ bind(&false_result);
2212  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2213 
2214  // Here result has either true or false. Deferred code also produces true or
2215  // false object.
2216  __ bind(deferred->exit());
2217  __ bind(&done);
2218 }
2219 
2220 
2221 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2222  Label* map_check) {
2223  Register result = ToRegister(instr->result());
2224  ASSERT(result.is(v0));
2225 
2227  flags = static_cast<InstanceofStub::Flags>(
2229  flags = static_cast<InstanceofStub::Flags>(
2231  flags = static_cast<InstanceofStub::Flags>(
2233  InstanceofStub stub(flags);
2234 
2235  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2236 
2237  // Get the temp register reserved by the instruction. This needs to be t0 as
2238  // its slot of the pushing of safepoint registers is used to communicate the
2239  // offset to the location of the map check.
2240  Register temp = ToRegister(instr->temp());
2241  ASSERT(temp.is(t0));
2242  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2243  static const int kAdditionalDelta = 7;
2244  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2245  Label before_push_delta;
2246  __ bind(&before_push_delta);
2247  {
2248  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2249  __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2250  __ StoreToSafepointRegisterSlot(temp, temp);
2251  }
2252  CallCodeGeneric(stub.GetCode(),
2253  RelocInfo::CODE_TARGET,
2254  instr,
2255  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2256  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2257  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2258  // Put the result value into the result register slot and
2259  // restore all registers.
2260  __ StoreToSafepointRegisterSlot(result, result);
2261 }
2262 
2263 
2264 void LCodeGen::DoCmpT(LCmpT* instr) {
2265  Token::Value op = instr->op();
2266 
2267  Handle<Code> ic = CompareIC::GetUninitialized(op);
2268  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2269  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2270 
2271  Condition condition = ComputeCompareCondition(op);
2272  // A minor optimization that relies on LoadRoot always emitting one
2273  // instruction.
2274  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2275  Label done;
2276  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2277  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2278  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2279  ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
2280  __ bind(&done);
2281 }
2282 
2283 
2284 void LCodeGen::DoReturn(LReturn* instr) {
2285  if (FLAG_trace) {
2286  // Push the return value on the stack as the parameter.
2287  // Runtime::TraceExit returns its parameter in v0.
2288  __ push(v0);
2289  __ CallRuntime(Runtime::kTraceExit, 1);
2290  }
2291  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2292  __ mov(sp, fp);
2293  __ Pop(ra, fp);
2294  __ Addu(sp, sp, Operand(sp_delta));
2295  __ Jump(ra);
2296 }
2297 
2298 
2299 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2300  Register result = ToRegister(instr->result());
2301  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2303  if (instr->hydrogen()->RequiresHoleCheck()) {
2304  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2305  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2306  }
2307 }
2308 
2309 
2310 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2311  ASSERT(ToRegister(instr->global_object()).is(a0));
2312  ASSERT(ToRegister(instr->result()).is(v0));
2313 
2314  __ li(a2, Operand(instr->name()));
2315  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2316  : RelocInfo::CODE_TARGET_CONTEXT;
2317  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2318  CallCode(ic, mode, instr);
2319 }
2320 
2321 
2322 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2323  Register value = ToRegister(instr->value());
2324  Register cell = scratch0();
2325 
2326  // Load the cell.
2327  __ li(cell, Operand(instr->hydrogen()->cell()));
2328 
2329  // If the cell we are storing to contains the hole it could have
2330  // been deleted from the property dictionary. In that case, we need
2331  // to update the property details in the property dictionary to mark
2332  // it as no longer deleted.
2333  if (instr->hydrogen()->RequiresHoleCheck()) {
2334  // We use a temp to check the payload.
2335  Register payload = ToRegister(instr->temp());
2337  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2338  DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2339  }
2340 
2341  // Store the value.
2343  // Cells are always rescanned, so no write barrier here.
2344 }
2345 
2346 
2347 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2348  ASSERT(ToRegister(instr->global_object()).is(a1));
2349  ASSERT(ToRegister(instr->value()).is(a0));
2350 
2351  __ li(a2, Operand(instr->name()));
2352  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2353  ? isolate()->builtins()->StoreIC_Initialize_Strict()
2354  : isolate()->builtins()->StoreIC_Initialize();
2355  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2356 }
2357 
2358 
2359 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2360  Register context = ToRegister(instr->context());
2361  Register result = ToRegister(instr->result());
2362 
2363  __ lw(result, ContextOperand(context, instr->slot_index()));
2364  if (instr->hydrogen()->RequiresHoleCheck()) {
2365  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2366 
2367  if (instr->hydrogen()->DeoptimizesOnHole()) {
2368  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2369  } else {
2370  Label is_not_hole;
2371  __ Branch(&is_not_hole, ne, result, Operand(at));
2372  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2373  __ bind(&is_not_hole);
2374  }
2375  }
2376 }
2377 
2378 
2379 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2380  Register context = ToRegister(instr->context());
2381  Register value = ToRegister(instr->value());
2382  Register scratch = scratch0();
2383  MemOperand target = ContextOperand(context, instr->slot_index());
2384 
2385  Label skip_assignment;
2386 
2387  if (instr->hydrogen()->RequiresHoleCheck()) {
2388  __ lw(scratch, target);
2389  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2390 
2391  if (instr->hydrogen()->DeoptimizesOnHole()) {
2392  DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2393  } else {
2394  __ Branch(&skip_assignment, ne, scratch, Operand(at));
2395  }
2396  }
2397 
2398  __ sw(value, target);
2399  if (instr->hydrogen()->NeedsWriteBarrier()) {
2400  HType type = instr->hydrogen()->value()->type();
2401  SmiCheck check_needed =
2402  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2403  __ RecordWriteContextSlot(context,
2404  target.offset(),
2405  value,
2406  scratch0(),
2408  kSaveFPRegs,
2410  check_needed);
2411  }
2412 
2413  __ bind(&skip_assignment);
2414 }
2415 
2416 
2417 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2418  Register object = ToRegister(instr->object());
2419  Register result = ToRegister(instr->result());
2420  if (instr->hydrogen()->is_in_object()) {
2421  __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2422  } else {
2423  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2424  __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2425  }
2426 }
2427 
2428 
2429 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2430  Register object,
2431  Handle<Map> type,
2432  Handle<String> name,
2433  LEnvironment* env) {
2434  LookupResult lookup(isolate());
2435  type->LookupDescriptor(NULL, *name, &lookup);
2436  ASSERT(lookup.IsFound() || lookup.IsCacheable());
2437  if (lookup.IsField()) {
2438  int index = lookup.GetLocalFieldIndexFromMap(*type);
2439  int offset = index * kPointerSize;
2440  if (index < 0) {
2441  // Negative property indices are in-object properties, indexed
2442  // from the end of the fixed part of the object.
2443  __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
2444  } else {
2445  // Non-negative property indices are in the properties array.
2446  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2447  __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2448  }
2449  } else if (lookup.IsConstantFunction()) {
2450  Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2451  __ LoadHeapObject(result, function);
2452  } else {
2453  // Negative lookup.
2454  // Check prototypes.
2455  Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
2456  Heap* heap = type->GetHeap();
2457  while (*current != heap->null_value()) {
2458  __ LoadHeapObject(result, current);
2459  __ lw(result, FieldMemOperand(result, HeapObject::kMapOffset));
2460  DeoptimizeIf(ne, env, result, Operand(Handle<Map>(current->map())));
2461  current =
2462  Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
2463  }
2464  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2465  }
2466 }
2467 
2468 
2469 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2470  Register object = ToRegister(instr->object());
2471  Register result = ToRegister(instr->result());
2472  Register object_map = scratch0();
2473 
2474  int map_count = instr->hydrogen()->types()->length();
2475  bool need_generic = instr->hydrogen()->need_generic();
2476 
2477  if (map_count == 0 && !need_generic) {
2478  DeoptimizeIf(al, instr->environment());
2479  return;
2480  }
2481  Handle<String> name = instr->hydrogen()->name();
2482  Label done;
2483  __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2484  for (int i = 0; i < map_count; ++i) {
2485  bool last = (i == map_count - 1);
2486  Handle<Map> map = instr->hydrogen()->types()->at(i);
2487  Label check_passed;
2488  __ CompareMapAndBranch(
2489  object_map, map, &check_passed,
2490  eq, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
2491  if (last && !need_generic) {
2492  DeoptimizeIf(al, instr->environment());
2493  __ bind(&check_passed);
2494  EmitLoadFieldOrConstantFunction(
2495  result, object, map, name, instr->environment());
2496  } else {
2497  Label next;
2498  __ Branch(&next);
2499  __ bind(&check_passed);
2500  EmitLoadFieldOrConstantFunction(
2501  result, object, map, name, instr->environment());
2502  __ Branch(&done);
2503  __ bind(&next);
2504  }
2505  }
2506  if (need_generic) {
2507  __ li(a2, Operand(name));
2508  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2509  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2510  }
2511  __ bind(&done);
2512 }
2513 
2514 
2515 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2516  ASSERT(ToRegister(instr->object()).is(a0));
2517  ASSERT(ToRegister(instr->result()).is(v0));
2518 
2519  // Name is always in a2.
2520  __ li(a2, Operand(instr->name()));
2521  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2522  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2523 }
2524 
2525 
2526 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2527  Register scratch = scratch0();
2528  Register function = ToRegister(instr->function());
2529  Register result = ToRegister(instr->result());
2530 
2531  // Check that the function really is a function. Load map into the
2532  // result register.
2533  __ GetObjectType(function, result, scratch);
2534  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2535 
2536  // Make sure that the function has an instance prototype.
2537  Label non_instance;
2538  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2539  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2540  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2541 
2542  // Get the prototype or initial map from the function.
2543  __ lw(result,
2545 
2546  // Check that the function has a prototype or an initial map.
2547  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2548  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2549 
2550  // If the function does not have an initial map, we're done.
2551  Label done;
2552  __ GetObjectType(result, scratch, scratch);
2553  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2554 
2555  // Get the prototype from the initial map.
2556  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2557  __ Branch(&done);
2558 
2559  // Non-instance prototype: Fetch prototype from constructor field
2560  // in initial map.
2561  __ bind(&non_instance);
2562  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2563 
2564  // All done.
2565  __ bind(&done);
2566 }
2567 
2568 
2569 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2570  Register result = ToRegister(instr->result());
2571  Register input = ToRegister(instr->object());
2572  Register scratch = scratch0();
2573 
2574  __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
2575  if (FLAG_debug_code) {
2576  Label done, fail;
2577  __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2578  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2579  __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
2580  __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
2581  __ Branch(&done, eq, scratch, Operand(at));
2582  // |scratch| still contains |input|'s map.
2583  __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2584  __ Ext(scratch, scratch, Map::kElementsKindShift,
2586  __ Branch(&fail, lt, scratch,
2587  Operand(GetInitialFastElementsKind()));
2588  __ Branch(&done, le, scratch,
2589  Operand(TERMINAL_FAST_ELEMENTS_KIND));
2590  __ Branch(&fail, lt, scratch,
2592  __ Branch(&done, le, scratch,
2594  __ bind(&fail);
2595  __ Abort("Check for fast or external elements failed.");
2596  __ bind(&done);
2597  }
2598 }
2599 
2600 
2601 void LCodeGen::DoLoadExternalArrayPointer(
2602  LLoadExternalArrayPointer* instr) {
2603  Register to_reg = ToRegister(instr->result());
2604  Register from_reg = ToRegister(instr->object());
2605  __ lw(to_reg, FieldMemOperand(from_reg,
2607 }
2608 
2609 
2610 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2611  Register arguments = ToRegister(instr->arguments());
2612  Register length = ToRegister(instr->length());
2613  Register index = ToRegister(instr->index());
2614  Register result = ToRegister(instr->result());
2615  // There are two words between the frame pointer and the last argument.
2616  // Subtracting from length accounts for one of them, add one more.
2617  __ subu(length, length, index);
2618  __ Addu(length, length, Operand(1));
2619  __ sll(length, length, kPointerSizeLog2);
2620  __ Addu(at, arguments, Operand(length));
2621  __ lw(result, MemOperand(at, 0));
2622 }
2623 
2624 
2625 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2626  Register elements = ToRegister(instr->elements());
2627  Register result = ToRegister(instr->result());
2628  Register scratch = scratch0();
2629  Register store_base = scratch;
2630  int offset = 0;
2631 
2632  if (instr->key()->IsConstantOperand()) {
2633  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2634  offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
2635  instr->additional_index());
2636  store_base = elements;
2637  } else {
2638  Register key = EmitLoadRegister(instr->key(), scratch);
2639  // Even though the HLoadKeyedFastElement instruction forces the input
2640  // representation for the key to be an integer, the input gets replaced
2641  // during bound check elimination with the index argument to the bounds
2642  // check, which can be tagged, so that case must be handled here, too.
2643  if (instr->hydrogen()->key()->representation().IsTagged()) {
2644  __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
2645  __ addu(scratch, elements, scratch);
2646  } else {
2647  __ sll(scratch, key, kPointerSizeLog2);
2648  __ addu(scratch, elements, scratch);
2649  }
2650  offset = FixedArray::OffsetOfElementAt(instr->additional_index());
2651  }
2652  __ lw(result, FieldMemOperand(store_base, offset));
2653 
2654  // Check for the hole value.
2655  if (instr->hydrogen()->RequiresHoleCheck()) {
2656  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2657  __ And(scratch, result, Operand(kSmiTagMask));
2658  DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
2659  } else {
2660  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2661  DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
2662  }
2663  }
2664 }
2665 
2666 
2667 void LCodeGen::DoLoadKeyedFastDoubleElement(
2668  LLoadKeyedFastDoubleElement* instr) {
2669  Register elements = ToRegister(instr->elements());
2670  bool key_is_constant = instr->key()->IsConstantOperand();
2671  Register key = no_reg;
2672  DoubleRegister result = ToDoubleRegister(instr->result());
2673  Register scratch = scratch0();
2674 
2675  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2676  int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
2677  ? (element_size_shift - kSmiTagSize) : element_size_shift;
2678  int constant_key = 0;
2679  if (key_is_constant) {
2680  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2681  if (constant_key & 0xF0000000) {
2682  Abort("array index constant value too big.");
2683  }
2684  } else {
2685  key = ToRegister(instr->key());
2686  }
2687 
2688  if (key_is_constant) {
2689  __ Addu(elements, elements,
2690  Operand(((constant_key + instr->additional_index()) <<
2691  element_size_shift) +
2693  } else {
2694  __ sll(scratch, key, shift_size);
2695  __ Addu(elements, elements, Operand(scratch));
2696  __ Addu(elements, elements,
2698  (instr->additional_index() << element_size_shift)));
2699  }
2700 
2701  if (instr->hydrogen()->RequiresHoleCheck()) {
2702  __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2703  DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2704  }
2705 
2706  __ ldc1(result, MemOperand(elements));
2707 }
2708 
2709 
2710 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
2711  Register base,
2712  bool key_is_constant,
2713  int constant_key,
2714  int element_size,
2715  int shift_size,
2716  int additional_index,
2717  int additional_offset) {
2718  if (additional_index != 0 && !key_is_constant) {
2719  additional_index *= 1 << (element_size - shift_size);
2720  __ Addu(scratch0(), key, Operand(additional_index));
2721  }
2722 
2723  if (key_is_constant) {
2724  return MemOperand(base,
2725  (constant_key << element_size) + additional_offset);
2726  }
2727 
2728  if (additional_index == 0) {
2729  if (shift_size >= 0) {
2730  __ sll(scratch0(), key, shift_size);
2731  __ Addu(scratch0(), base, scratch0());
2732  return MemOperand(scratch0());
2733  } else {
2734  ASSERT_EQ(-1, shift_size);
2735  __ srl(scratch0(), key, 1);
2736  __ Addu(scratch0(), base, scratch0());
2737  return MemOperand(scratch0());
2738  }
2739  }
2740 
2741  if (shift_size >= 0) {
2742  __ sll(scratch0(), scratch0(), shift_size);
2743  __ Addu(scratch0(), base, scratch0());
2744  return MemOperand(scratch0());
2745  } else {
2746  ASSERT_EQ(-1, shift_size);
2747  __ srl(scratch0(), scratch0(), 1);
2748  __ Addu(scratch0(), base, scratch0());
2749  return MemOperand(scratch0());
2750  }
2751 }
2752 
2753 
2754 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2755  LLoadKeyedSpecializedArrayElement* instr) {
2756  Register external_pointer = ToRegister(instr->external_pointer());
2757  Register key = no_reg;
2758  ElementsKind elements_kind = instr->elements_kind();
2759  bool key_is_constant = instr->key()->IsConstantOperand();
2760  int constant_key = 0;
2761  if (key_is_constant) {
2762  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2763  if (constant_key & 0xF0000000) {
2764  Abort("array index constant value too big.");
2765  }
2766  } else {
2767  key = ToRegister(instr->key());
2768  }
2769  int element_size_shift = ElementsKindToShiftSize(elements_kind);
2770  int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
2771  ? (element_size_shift - kSmiTagSize) : element_size_shift;
2772  int additional_offset = instr->additional_index() << element_size_shift;
2773 
2774  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2775  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2776  FPURegister result = ToDoubleRegister(instr->result());
2777  if (key_is_constant) {
2778  __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
2779  } else {
2780  __ sll(scratch0(), key, shift_size);
2781  __ Addu(scratch0(), scratch0(), external_pointer);
2782  }
2783 
2784  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2785  __ lwc1(result, MemOperand(scratch0(), additional_offset));
2786  __ cvt_d_s(result, result);
2787  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2788  __ ldc1(result, MemOperand(scratch0(), additional_offset));
2789  }
2790  } else {
2791  Register result = ToRegister(instr->result());
2792  MemOperand mem_operand = PrepareKeyedOperand(
2793  key, external_pointer, key_is_constant, constant_key,
2794  element_size_shift, shift_size,
2795  instr->additional_index(), additional_offset);
2796  switch (elements_kind) {
2798  __ lb(result, mem_operand);
2799  break;
2802  __ lbu(result, mem_operand);
2803  break;
2805  __ lh(result, mem_operand);
2806  break;
2808  __ lhu(result, mem_operand);
2809  break;
2810  case EXTERNAL_INT_ELEMENTS:
2811  __ lw(result, mem_operand);
2812  break;
2814  __ lw(result, mem_operand);
2815  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2816  DeoptimizeIf(Ugreater_equal, instr->environment(),
2817  result, Operand(0x80000000));
2818  }
2819  break;
2822  case FAST_DOUBLE_ELEMENTS:
2823  case FAST_ELEMENTS:
2824  case FAST_SMI_ELEMENTS:
2826  case FAST_HOLEY_ELEMENTS:
2828  case DICTIONARY_ELEMENTS:
2830  UNREACHABLE();
2831  break;
2832  }
2833  }
2834 }
2835 
2836 
2837 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2838  ASSERT(ToRegister(instr->object()).is(a1));
2839  ASSERT(ToRegister(instr->key()).is(a0));
2840 
2841  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2842  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2843 }
2844 
2845 
2846 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2847  Register scratch = scratch0();
2848  Register temp = scratch1();
2849  Register result = ToRegister(instr->result());
2850 
2851  if (instr->hydrogen()->from_inlined()) {
2852  __ Subu(result, sp, 2 * kPointerSize);
2853  } else {
2854  // Check if the calling frame is an arguments adaptor frame.
2855  Label done, adapted;
2858  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2859 
2860  // Result is the frame pointer for the frame if not adapted and for the real
2861  // frame below the adaptor frame if adapted.
2862  __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
2863  __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
2864  }
2865 }
2866 
2867 
2868 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2869  Register elem = ToRegister(instr->elements());
2870  Register result = ToRegister(instr->result());
2871 
2872  Label done;
2873 
2874  // If no arguments adaptor frame the number of arguments is fixed.
2875  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
2876  __ Branch(&done, eq, fp, Operand(elem));
2877 
2878  // Arguments adaptor frame present. Get argument length from there.
2880  __ lw(result,
2882  __ SmiUntag(result);
2883 
2884  // Argument length is in result register.
2885  __ bind(&done);
2886 }
2887 
2888 
2889 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2890  Register receiver = ToRegister(instr->receiver());
2891  Register function = ToRegister(instr->function());
2892  Register scratch = scratch0();
2893 
2894  // If the receiver is null or undefined, we have to pass the global
2895  // object as a receiver to normal functions. Values have to be
2896  // passed unchanged to builtins and strict-mode functions.
2897  Label global_object, receiver_ok;
2898 
2899  // Do not transform the receiver to object for strict mode
2900  // functions.
2901  __ lw(scratch,
2903  __ lw(scratch,
2905 
2906  // Do not transform the receiver to object for builtins.
2907  int32_t strict_mode_function_mask =
2909  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2910  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
2911  __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
2912 
2913  // Normal function. Replace undefined or null with global receiver.
2914  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2915  __ Branch(&global_object, eq, receiver, Operand(scratch));
2916  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2917  __ Branch(&global_object, eq, receiver, Operand(scratch));
2918 
2919  // Deoptimize if the receiver is not a JS object.
2920  __ And(scratch, receiver, Operand(kSmiTagMask));
2921  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
2922 
2923  __ GetObjectType(receiver, scratch, scratch);
2924  DeoptimizeIf(lt, instr->environment(),
2925  scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
2926  __ Branch(&receiver_ok);
2927 
2928  __ bind(&global_object);
2929  __ lw(receiver, GlobalObjectOperand());
2930  __ lw(receiver,
2932  __ bind(&receiver_ok);
2933 }
2934 
2935 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2936  Register receiver = ToRegister(instr->receiver());
2937  Register function = ToRegister(instr->function());
2938  Register length = ToRegister(instr->length());
2939  Register elements = ToRegister(instr->elements());
2940  Register scratch = scratch0();
2941  ASSERT(receiver.is(a0)); // Used for parameter count.
2942  ASSERT(function.is(a1)); // Required by InvokeFunction.
2943  ASSERT(ToRegister(instr->result()).is(v0));
2944 
2945  // Copy the arguments to this function possibly from the
2946  // adaptor frame below it.
2947  const uint32_t kArgumentsLimit = 1 * KB;
2948  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
2949 
2950  // Push the receiver and use the register to keep the original
2951  // number of arguments.
2952  __ push(receiver);
2953  __ Move(receiver, length);
2954  // The arguments are at a one pointer size offset from elements.
2955  __ Addu(elements, elements, Operand(1 * kPointerSize));
2956 
2957  // Loop through the arguments pushing them onto the execution
2958  // stack.
2959  Label invoke, loop;
2960  // length is a small non-negative integer, due to the test above.
2961  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
2962  __ sll(scratch, length, 2);
2963  __ bind(&loop);
2964  __ Addu(scratch, elements, scratch);
2965  __ lw(scratch, MemOperand(scratch));
2966  __ push(scratch);
2967  __ Subu(length, length, Operand(1));
2968  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
2969  __ sll(scratch, length, 2);
2970 
2971  __ bind(&invoke);
2972  ASSERT(instr->HasPointerMap());
2973  LPointerMap* pointers = instr->pointer_map();
2974  RecordPosition(pointers->position());
2975  SafepointGenerator safepoint_generator(
2976  this, pointers, Safepoint::kLazyDeopt);
2977  // The number of arguments is stored in receiver which is a0, as expected
2978  // by InvokeFunction.
2979  ParameterCount actual(receiver);
2980  __ InvokeFunction(function, actual, CALL_FUNCTION,
2981  safepoint_generator, CALL_AS_METHOD);
2983 }
2984 
2985 
2986 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2987  LOperand* argument = instr->value();
2988  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2989  Abort("DoPushArgument not implemented for double type.");
2990  } else {
2991  Register argument_reg = EmitLoadRegister(argument, at);
2992  __ push(argument_reg);
2993  }
2994 }
2995 
2996 
2997 void LCodeGen::DoDrop(LDrop* instr) {
2998  __ Drop(instr->count());
2999 }
3000 
3001 
3002 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3003  Register result = ToRegister(instr->result());
3005 }
3006 
3007 
3008 void LCodeGen::DoContext(LContext* instr) {
3009  Register result = ToRegister(instr->result());
3010  __ mov(result, cp);
3011 }
3012 
3013 
3014 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3015  Register context = ToRegister(instr->context());
3016  Register result = ToRegister(instr->result());
3017  __ lw(result,
3019 }
3020 
3021 
3022 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3023  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3024  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3025  // The context is the first argument.
3026  __ Push(cp, scratch0(), scratch1());
3027  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3028 }
3029 
3030 
3031 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3032  Register result = ToRegister(instr->result());
3034 }
3035 
3036 
3037 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3038  Register global = ToRegister(instr->global_object());
3039  Register result = ToRegister(instr->result());
3041 }
3042 
3043 
3044 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3045  int arity,
3046  LInstruction* instr,
3047  CallKind call_kind,
3048  A1State a1_state) {
3049  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
3050  function->shared()->formal_parameter_count() == arity;
3051 
3052  LPointerMap* pointers = instr->pointer_map();
3053  RecordPosition(pointers->position());
3054 
3055  if (can_invoke_directly) {
3056  if (a1_state == A1_UNINITIALIZED) {
3057  __ LoadHeapObject(a1, function);
3058  }
3059 
3060  // Change context.
3062 
3063  // Set r0 to arguments count if adaption is not needed. Assumes that r0
3064  // is available to write to at this point.
3065  if (!function->NeedsArgumentsAdaption()) {
3066  __ li(a0, Operand(arity));
3067  }
3068 
3069  // Invoke function.
3070  __ SetCallKind(t1, call_kind);
3072  __ Call(at);
3073 
3074  // Set up deoptimization.
3075  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3076  } else {
3077  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3078  ParameterCount count(arity);
3079  __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
3080  }
3081 
3082  // Restore context.
3084 }
3085 
3086 
3087 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3088  ASSERT(ToRegister(instr->result()).is(v0));
3089  __ mov(a0, v0);
3090  CallKnownFunction(instr->function(),
3091  instr->arity(),
3092  instr,
3094  A1_UNINITIALIZED);
3095 }
3096 
3097 
3098 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3099  Register input = ToRegister(instr->value());
3100  Register result = ToRegister(instr->result());
3101  Register scratch = scratch0();
3102 
3103  // Deoptimize if not a heap number.
3104  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3105  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3106  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
3107 
3108  Label done;
3109  Register exponent = scratch0();
3110  scratch = no_reg;
3111  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3112  // Check the sign of the argument. If the argument is positive, just
3113  // return it.
3114  __ Move(result, input);
3115  __ And(at, exponent, Operand(HeapNumber::kSignMask));
3116  __ Branch(&done, eq, at, Operand(zero_reg));
3117 
3118  // Input is negative. Reverse its sign.
3119  // Preserve the value of all registers.
3120  {
3121  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3122 
3123  // Registers were saved at the safepoint, so we can use
3124  // many scratch registers.
3125  Register tmp1 = input.is(a1) ? a0 : a1;
3126  Register tmp2 = input.is(a2) ? a0 : a2;
3127  Register tmp3 = input.is(a3) ? a0 : a3;
3128  Register tmp4 = input.is(t0) ? a0 : t0;
3129 
3130  // exponent: floating point exponent value.
3131 
3132  Label allocated, slow;
3133  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3134  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3135  __ Branch(&allocated);
3136 
3137  // Slow case: Call the runtime system to do the number allocation.
3138  __ bind(&slow);
3139 
3140  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3141  // Set the pointer to the new heap number in tmp.
3142  if (!tmp1.is(v0))
3143  __ mov(tmp1, v0);
3144  // Restore input_reg after call to runtime.
3145  __ LoadFromSafepointRegisterSlot(input, input);
3146  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3147 
3148  __ bind(&allocated);
3149  // exponent: floating point exponent value.
3150  // tmp1: allocated heap number.
3151  __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3152  __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3155 
3156  __ StoreToSafepointRegisterSlot(tmp1, result);
3157  }
3158 
3159  __ bind(&done);
3160 }
3161 
3162 
3163 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3164  Register input = ToRegister(instr->value());
3165  Register result = ToRegister(instr->result());
3166  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3167  Label done;
3168  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3169  __ mov(result, input);
3170  ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
3171  __ subu(result, zero_reg, input);
3172  // Overflow if result is still negative, i.e. 0x80000000.
3173  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
3174  __ bind(&done);
3175 }
3176 
3177 
3178 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3179  // Class for deferred case.
3180  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3181  public:
3182  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3183  LUnaryMathOperation* instr)
3184  : LDeferredCode(codegen), instr_(instr) { }
3185  virtual void Generate() {
3186  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3187  }
3188  virtual LInstruction* instr() { return instr_; }
3189  private:
3190  LUnaryMathOperation* instr_;
3191  };
3192 
3193  Representation r = instr->hydrogen()->value()->representation();
3194  if (r.IsDouble()) {
3195  FPURegister input = ToDoubleRegister(instr->value());
3196  FPURegister result = ToDoubleRegister(instr->result());
3197  __ abs_d(result, input);
3198  } else if (r.IsInteger32()) {
3199  EmitIntegerMathAbs(instr);
3200  } else {
3201  // Representation is tagged.
3202  DeferredMathAbsTaggedHeapNumber* deferred =
3203  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3204  Register input = ToRegister(instr->value());
3205  // Smi check.
3206  __ JumpIfNotSmi(input, deferred->entry());
3207  // If smi, handle it directly.
3208  EmitIntegerMathAbs(instr);
3209  __ bind(deferred->exit());
3210  }
3211 }
3212 
3213 
3214 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3215  DoubleRegister input = ToDoubleRegister(instr->value());
3216  Register result = ToRegister(instr->result());
3217  FPURegister single_scratch = double_scratch0().low();
3218  Register scratch1 = scratch0();
3219  Register except_flag = ToRegister(instr->temp());
3220 
3221  __ EmitFPUTruncate(kRoundToMinusInf,
3222  single_scratch,
3223  input,
3224  scratch1,
3225  except_flag);
3226 
3227  // Deopt if the operation did not succeed.
3228  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3229 
3230  // Load the result.
3231  __ mfc1(result, single_scratch);
3232 
3233  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3234  // Test for -0.
3235  Label done;
3236  __ Branch(&done, ne, result, Operand(zero_reg));
3237  __ mfc1(scratch1, input.high());
3238  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3239  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3240  __ bind(&done);
3241  }
3242 }
3243 
3244 
3245 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3246  DoubleRegister input = ToDoubleRegister(instr->value());
3247  Register result = ToRegister(instr->result());
3248  Register scratch = scratch0();
3249  Label done, check_sign_on_zero;
3250 
3251  // Extract exponent bits.
3252  __ mfc1(result, input.high());
3253  __ Ext(scratch,
3254  result,
3257 
3258  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3259  Label skip1;
3260  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3261  __ mov(result, zero_reg);
3262  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3263  __ Branch(&check_sign_on_zero);
3264  } else {
3265  __ Branch(&done);
3266  }
3267  __ bind(&skip1);
3268 
3269  // The following conversion will not work with numbers
3270  // outside of ]-2^32, 2^32[.
3271  DeoptimizeIf(ge, instr->environment(), scratch,
3272  Operand(HeapNumber::kExponentBias + 32));
3273 
3274  // Save the original sign for later comparison.
3275  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3276 
3277  __ Move(double_scratch0(), 0.5);
3278  __ add_d(double_scratch0(), input, double_scratch0());
3279 
3280  // Check sign of the result: if the sign changed, the input
3281  // value was in ]0.5, 0[ and the result should be -0.
3282  __ mfc1(result, double_scratch0().high());
3283  __ Xor(result, result, Operand(scratch));
3284  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3285  // ARM uses 'mi' here, which is 'lt'
3286  DeoptimizeIf(lt, instr->environment(), result,
3287  Operand(zero_reg));
3288  } else {
3289  Label skip2;
3290  // ARM uses 'mi' here, which is 'lt'
3291  // Negating it results in 'ge'
3292  __ Branch(&skip2, ge, result, Operand(zero_reg));
3293  __ mov(result, zero_reg);
3294  __ Branch(&done);
3295  __ bind(&skip2);
3296  }
3297 
3298  Register except_flag = scratch;
3299 
3300  __ EmitFPUTruncate(kRoundToMinusInf,
3301  double_scratch0().low(),
3302  double_scratch0(),
3303  result,
3304  except_flag);
3305 
3306  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3307 
3308  __ mfc1(result, double_scratch0().low());
3309 
3310  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3311  // Test for -0.
3312  __ Branch(&done, ne, result, Operand(zero_reg));
3313  __ bind(&check_sign_on_zero);
3314  __ mfc1(scratch, input.high());
3315  __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3316  DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3317  }
3318  __ bind(&done);
3319 }
3320 
3321 
3322 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3323  DoubleRegister input = ToDoubleRegister(instr->value());
3324  DoubleRegister result = ToDoubleRegister(instr->result());
3325  __ sqrt_d(result, input);
3326 }
3327 
3328 
3329 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3330  DoubleRegister input = ToDoubleRegister(instr->value());
3331  DoubleRegister result = ToDoubleRegister(instr->result());
3332  DoubleRegister temp = ToDoubleRegister(instr->temp());
3333 
3334  ASSERT(!input.is(result));
3335 
3336  // Note that according to ECMA-262 15.8.2.13:
3337  // Math.pow(-Infinity, 0.5) == Infinity
3338  // Math.sqrt(-Infinity) == NaN
3339  Label done;
3340  __ Move(temp, -V8_INFINITY);
3341  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3342  // Set up Infinity in the delay slot.
3343  // result is overwritten if the branch is not taken.
3344  __ neg_d(result, temp);
3345 
3346  // Add +0 to convert -0 to +0.
3347  __ add_d(result, input, kDoubleRegZero);
3348  __ sqrt_d(result, result);
3349  __ bind(&done);
3350 }
3351 
3352 
3353 void LCodeGen::DoPower(LPower* instr) {
3354  Representation exponent_type = instr->hydrogen()->right()->representation();
3355  // Having marked this as a call, we can use any registers.
3356  // Just make sure that the input/output registers are the expected ones.
3357  ASSERT(!instr->right()->IsDoubleRegister() ||
3358  ToDoubleRegister(instr->right()).is(f4));
3359  ASSERT(!instr->right()->IsRegister() ||
3360  ToRegister(instr->right()).is(a2));
3361  ASSERT(ToDoubleRegister(instr->left()).is(f2));
3362  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3363 
3364  if (exponent_type.IsTagged()) {
3365  Label no_deopt;
3366  __ JumpIfSmi(a2, &no_deopt);
3368  DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
3369  __ bind(&no_deopt);
3370  MathPowStub stub(MathPowStub::TAGGED);
3371  __ CallStub(&stub);
3372  } else if (exponent_type.IsInteger32()) {
3373  MathPowStub stub(MathPowStub::INTEGER);
3374  __ CallStub(&stub);
3375  } else {
3376  ASSERT(exponent_type.IsDouble());
3377  MathPowStub stub(MathPowStub::DOUBLE);
3378  __ CallStub(&stub);
3379  }
3380 }
3381 
3382 
3383 void LCodeGen::DoRandom(LRandom* instr) {
3384  class DeferredDoRandom: public LDeferredCode {
3385  public:
3386  DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3387  : LDeferredCode(codegen), instr_(instr) { }
3388  virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3389  virtual LInstruction* instr() { return instr_; }
3390  private:
3391  LRandom* instr_;
3392  };
3393 
3394  DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3395  // Having marked this instruction as a call we can use any
3396  // registers.
3397  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3398  ASSERT(ToRegister(instr->global_object()).is(a0));
3399 
3400  static const int kSeedSize = sizeof(uint32_t);
3401  STATIC_ASSERT(kPointerSize == kSeedSize);
3402 
3404  static const int kRandomSeedOffset =
3406  __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
3407  // a2: FixedArray of the native context's random seeds
3408 
3409  // Load state[0].
3411  __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
3412  // Load state[1].
3413  __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3414  // a1: state[0].
3415  // a0: state[1].
3416 
3417  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3418  __ And(a3, a1, Operand(0xFFFF));
3419  __ li(t0, Operand(18273));
3420  __ Mul(a3, a3, t0);
3421  __ srl(a1, a1, 16);
3422  __ Addu(a1, a3, a1);
3423  // Save state[0].
3425 
3426  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3427  __ And(a3, a0, Operand(0xFFFF));
3428  __ li(t0, Operand(36969));
3429  __ Mul(a3, a3, t0);
3430  __ srl(a0, a0, 16),
3431  __ Addu(a0, a3, a0);
3432  // Save state[1].
3433  __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3434 
3435  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3436  __ And(a0, a0, Operand(0x3FFFF));
3437  __ sll(a1, a1, 14);
3438  __ Addu(v0, a0, a1);
3439 
3440  __ bind(deferred->exit());
3441 
3442  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3443  __ li(a2, Operand(0x41300000));
3444  // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
3445  __ Move(f12, v0, a2);
3446  // Move 0x4130000000000000 to FPU.
3447  __ Move(f14, zero_reg, a2);
3448  // Subtract to get the result.
3449  __ sub_d(f0, f12, f14);
3450 }
3451 
3452 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3453  __ PrepareCallCFunction(1, scratch0());
3454  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3455  // Return value is in v0.
3456 }
3457 
3458 
3459 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3460  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3461  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3463  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3464 }
3465 
3466 
3467 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3468  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3469  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3471  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3472 }
3473 
3474 
3475 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3476  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3477  TranscendentalCacheStub stub(TranscendentalCache::COS,
3479  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3480 }
3481 
3482 
3483 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3484  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3485  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3487  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3488 }
3489 
3490 
3491 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3492  switch (instr->op()) {
3493  case kMathAbs:
3494  DoMathAbs(instr);
3495  break;
3496  case kMathFloor:
3497  DoMathFloor(instr);
3498  break;
3499  case kMathRound:
3500  DoMathRound(instr);
3501  break;
3502  case kMathSqrt:
3503  DoMathSqrt(instr);
3504  break;
3505  case kMathPowHalf:
3506  DoMathPowHalf(instr);
3507  break;
3508  case kMathCos:
3509  DoMathCos(instr);
3510  break;
3511  case kMathSin:
3512  DoMathSin(instr);
3513  break;
3514  case kMathTan:
3515  DoMathTan(instr);
3516  break;
3517  case kMathLog:
3518  DoMathLog(instr);
3519  break;
3520  default:
3521  Abort("Unimplemented type of LUnaryMathOperation.");
3522  UNREACHABLE();
3523  }
3524 }
3525 
3526 
3527 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3528  ASSERT(ToRegister(instr->function()).is(a1));
3529  ASSERT(instr->HasPointerMap());
3530 
3531  if (instr->known_function().is_null()) {
3532  LPointerMap* pointers = instr->pointer_map();
3533  RecordPosition(pointers->position());
3534  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3535  ParameterCount count(instr->arity());
3536  __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3538  } else {
3539  CallKnownFunction(instr->known_function(),
3540  instr->arity(),
3541  instr,
3543  A1_CONTAINS_TARGET);
3544  }
3545 }
3546 
3547 
3548 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3549  ASSERT(ToRegister(instr->result()).is(v0));
3550 
3551  int arity = instr->arity();
3552  Handle<Code> ic =
3553  isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3554  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3556 }
3557 
3558 
3559 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3560  ASSERT(ToRegister(instr->result()).is(v0));
3561 
3562  int arity = instr->arity();
3563  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3564  Handle<Code> ic =
3565  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3566  __ li(a2, Operand(instr->name()));
3567  CallCode(ic, mode, instr);
3568  // Restore context register.
3570 }
3571 
3572 
3573 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3574  ASSERT(ToRegister(instr->function()).is(a1));
3575  ASSERT(ToRegister(instr->result()).is(v0));
3576 
3577  int arity = instr->arity();
3578  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3579  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3581 }
3582 
3583 
3584 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3585  ASSERT(ToRegister(instr->result()).is(v0));
3586 
3587  int arity = instr->arity();
3588  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3589  Handle<Code> ic =
3590  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3591  __ li(a2, Operand(instr->name()));
3592  CallCode(ic, mode, instr);
3594 }
3595 
3596 
3597 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3598  ASSERT(ToRegister(instr->result()).is(v0));
3599  CallKnownFunction(instr->target(),
3600  instr->arity(),
3601  instr,
3603  A1_UNINITIALIZED);
3604 }
3605 
3606 
3607 void LCodeGen::DoCallNew(LCallNew* instr) {
3608  ASSERT(ToRegister(instr->constructor()).is(a1));
3609  ASSERT(ToRegister(instr->result()).is(v0));
3610 
3611  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3612  __ li(a0, Operand(instr->arity()));
3613  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3614 }
3615 
3616 
3617 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3618  CallRuntime(instr->function(), instr->arity(), instr);
3619 }
3620 
3621 
3622 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3623  Register object = ToRegister(instr->object());
3624  Register value = ToRegister(instr->value());
3625  Register scratch = scratch0();
3626  int offset = instr->offset();
3627 
3628  ASSERT(!object.is(value));
3629 
3630  if (!instr->transition().is_null()) {
3631  __ li(scratch, Operand(instr->transition()));
3632  __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3633  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3634  Register temp = ToRegister(instr->temp());
3635  // Update the write barrier for the map field.
3636  __ RecordWriteField(object,
3638  scratch,
3639  temp,
3641  kSaveFPRegs,
3643  OMIT_SMI_CHECK);
3644  }
3645  }
3646 
3647  // Do the store.
3648  HType type = instr->hydrogen()->value()->type();
3649  SmiCheck check_needed =
3650  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3651  if (instr->is_in_object()) {
3652  __ sw(value, FieldMemOperand(object, offset));
3653  if (instr->hydrogen()->NeedsWriteBarrier()) {
3654  // Update the write barrier for the object for in-object properties.
3655  __ RecordWriteField(object,
3656  offset,
3657  value,
3658  scratch,
3660  kSaveFPRegs,
3662  check_needed);
3663  }
3664  } else {
3665  __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3666  __ sw(value, FieldMemOperand(scratch, offset));
3667  if (instr->hydrogen()->NeedsWriteBarrier()) {
3668  // Update the write barrier for the properties array.
3669  // object is used as a scratch register.
3670  __ RecordWriteField(scratch,
3671  offset,
3672  value,
3673  object,
3675  kSaveFPRegs,
3677  check_needed);
3678  }
3679  }
3680 }
3681 
3682 
3683 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3684  ASSERT(ToRegister(instr->object()).is(a1));
3685  ASSERT(ToRegister(instr->value()).is(a0));
3686 
3687  // Name is always in a2.
3688  __ li(a2, Operand(instr->name()));
3689  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3690  ? isolate()->builtins()->StoreIC_Initialize_Strict()
3691  : isolate()->builtins()->StoreIC_Initialize();
3692  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3693 }
3694 
3695 
3696 void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
3697  HValue* value,
3698  LOperand* operand) {
3699  if (value->representation().IsTagged() && !value->type().IsSmi()) {
3700  if (operand->IsRegister()) {
3701  __ And(at, ToRegister(operand), Operand(kSmiTagMask));
3702  DeoptimizeIf(ne, environment, at, Operand(zero_reg));
3703  } else {
3704  __ li(at, ToOperand(operand));
3705  __ And(at, at, Operand(kSmiTagMask));
3706  DeoptimizeIf(ne, environment, at, Operand(zero_reg));
3707  }
3708  }
3709 }
3710 
3711 
3712 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3713  DeoptIfTaggedButNotSmi(instr->environment(),
3714  instr->hydrogen()->length(),
3715  instr->length());
3716  DeoptIfTaggedButNotSmi(instr->environment(),
3717  instr->hydrogen()->index(),
3718  instr->index());
3719  if (instr->index()->IsConstantOperand()) {
3720  int constant_index =
3721  ToInteger32(LConstantOperand::cast(instr->index()));
3722  if (instr->hydrogen()->length()->representation().IsTagged()) {
3723  __ li(at, Operand(Smi::FromInt(constant_index)));
3724  } else {
3725  __ li(at, Operand(constant_index));
3726  }
3727  DeoptimizeIf(hs,
3728  instr->environment(),
3729  at,
3730  Operand(ToRegister(instr->length())));
3731  } else {
3732  DeoptimizeIf(hs,
3733  instr->environment(),
3734  ToRegister(instr->index()),
3735  Operand(ToRegister(instr->length())));
3736  }
3737 }
3738 
3739 
3740 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3741  Register value = ToRegister(instr->value());
3742  Register elements = ToRegister(instr->object());
3743  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3744  Register scratch = scratch0();
3745  Register store_base = scratch;
3746  int offset = 0;
3747 
3748  // Do the store.
3749  if (instr->key()->IsConstantOperand()) {
3750  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3751  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3752  offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3753  instr->additional_index());
3754  store_base = elements;
3755  } else {
3756  // Even though the HLoadKeyedFastElement instruction forces the input
3757  // representation for the key to be an integer, the input gets replaced
3758  // during bound check elimination with the index argument to the bounds
3759  // check, which can be tagged, so that case must be handled here, too.
3760  if (instr->hydrogen()->key()->representation().IsTagged()) {
3761  __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3762  __ addu(scratch, elements, scratch);
3763  } else {
3764  __ sll(scratch, key, kPointerSizeLog2);
3765  __ addu(scratch, elements, scratch);
3766  }
3767  offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3768  }
3769  __ sw(value, FieldMemOperand(store_base, offset));
3770 
3771  if (instr->hydrogen()->NeedsWriteBarrier()) {
3772  HType type = instr->hydrogen()->value()->type();
3773  SmiCheck check_needed =
3774  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3775  // Compute address of modified element and store it into key register.
3776  __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
3777  __ RecordWrite(elements,
3778  key,
3779  value,
3781  kSaveFPRegs,
3783  check_needed);
3784  }
3785 }
3786 
3787 
3788 void LCodeGen::DoStoreKeyedFastDoubleElement(
3789  LStoreKeyedFastDoubleElement* instr) {
3790  DoubleRegister value = ToDoubleRegister(instr->value());
3791  Register elements = ToRegister(instr->elements());
3792  Register key = no_reg;
3793  Register scratch = scratch0();
3794  bool key_is_constant = instr->key()->IsConstantOperand();
3795  int constant_key = 0;
3796  Label not_nan;
3797 
3798  // Calculate the effective address of the slot in the array to store the
3799  // double value.
3800  if (key_is_constant) {
3801  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3802  if (constant_key & 0xF0000000) {
3803  Abort("array index constant value too big.");
3804  }
3805  } else {
3806  key = ToRegister(instr->key());
3807  }
3808  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3809  int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
3810  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3811  if (key_is_constant) {
3812  __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
3814  } else {
3815  __ sll(scratch, key, shift_size);
3816  __ Addu(scratch, elements, Operand(scratch));
3817  __ Addu(scratch, scratch,
3819  }
3820 
3821  if (instr->NeedsCanonicalization()) {
3822  Label is_nan;
3823  // Check for NaN. All NaNs must be canonicalized.
3824  __ BranchF(NULL, &is_nan, eq, value, value);
3825  __ Branch(&not_nan);
3826 
3827  // Only load canonical NaN if the comparison above set the overflow.
3828  __ bind(&is_nan);
3830  }
3831 
3832  __ bind(&not_nan);
3833  __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
3834  element_size_shift));
3835 }
3836 
3837 
3838 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3839  LStoreKeyedSpecializedArrayElement* instr) {
3840 
3841  Register external_pointer = ToRegister(instr->external_pointer());
3842  Register key = no_reg;
3843  ElementsKind elements_kind = instr->elements_kind();
3844  bool key_is_constant = instr->key()->IsConstantOperand();
3845  int constant_key = 0;
3846  if (key_is_constant) {
3847  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3848  if (constant_key & 0xF0000000) {
3849  Abort("array index constant value too big.");
3850  }
3851  } else {
3852  key = ToRegister(instr->key());
3853  }
3854  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3855  int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
3856  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3857  int additional_offset = instr->additional_index() << element_size_shift;
3858 
3859  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3860  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3861  FPURegister value(ToDoubleRegister(instr->value()));
3862  if (key_is_constant) {
3863  __ Addu(scratch0(), external_pointer, constant_key <<
3864  element_size_shift);
3865  } else {
3866  __ sll(scratch0(), key, shift_size);
3867  __ Addu(scratch0(), scratch0(), external_pointer);
3868  }
3869 
3870  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3871  __ cvt_s_d(double_scratch0(), value);
3872  __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
3873  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3874  __ sdc1(value, MemOperand(scratch0(), additional_offset));
3875  }
3876  } else {
3877  Register value(ToRegister(instr->value()));
3878  MemOperand mem_operand = PrepareKeyedOperand(
3879  key, external_pointer, key_is_constant, constant_key,
3880  element_size_shift, shift_size,
3881  instr->additional_index(), additional_offset);
3882  switch (elements_kind) {
3886  __ sb(value, mem_operand);
3887  break;
3890  __ sh(value, mem_operand);
3891  break;
3892  case EXTERNAL_INT_ELEMENTS:
3894  __ sw(value, mem_operand);
3895  break;
3898  case FAST_DOUBLE_ELEMENTS:
3899  case FAST_ELEMENTS:
3900  case FAST_SMI_ELEMENTS:
3902  case FAST_HOLEY_ELEMENTS:
3904  case DICTIONARY_ELEMENTS:
3906  UNREACHABLE();
3907  break;
3908  }
3909  }
3910 }
3911 
3912 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3913  ASSERT(ToRegister(instr->object()).is(a2));
3914  ASSERT(ToRegister(instr->key()).is(a1));
3915  ASSERT(ToRegister(instr->value()).is(a0));
3916 
3917  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3918  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3919  : isolate()->builtins()->KeyedStoreIC_Initialize();
3920  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3921 }
3922 
3923 
3924 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3925  Register object_reg = ToRegister(instr->object());
3926  Register new_map_reg = ToRegister(instr->new_map_temp());
3927  Register scratch = scratch0();
3928 
3929  Handle<Map> from_map = instr->original_map();
3930  Handle<Map> to_map = instr->transitioned_map();
3931  ElementsKind from_kind = from_map->elements_kind();
3932  ElementsKind to_kind = to_map->elements_kind();
3933 
3934  __ mov(ToRegister(instr->result()), object_reg);
3935 
3936  Label not_applicable;
3937  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3938  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
3939 
3940  __ li(new_map_reg, Operand(to_map));
3941  if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
3942  __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3943  // Write barrier.
3944  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3945  scratch, kRAHasBeenSaved, kDontSaveFPRegs);
3946  } else if (IsFastSmiElementsKind(from_kind) &&
3947  IsFastDoubleElementsKind(to_kind)) {
3948  Register fixed_object_reg = ToRegister(instr->temp());
3949  ASSERT(fixed_object_reg.is(a2));
3950  ASSERT(new_map_reg.is(a3));
3951  __ mov(fixed_object_reg, object_reg);
3952  CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3953  RelocInfo::CODE_TARGET, instr);
3954  } else if (IsFastDoubleElementsKind(from_kind) &&
3955  IsFastObjectElementsKind(to_kind)) {
3956  Register fixed_object_reg = ToRegister(instr->temp());
3957  ASSERT(fixed_object_reg.is(a2));
3958  ASSERT(new_map_reg.is(a3));
3959  __ mov(fixed_object_reg, object_reg);
3960  CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3961  RelocInfo::CODE_TARGET, instr);
3962  } else {
3963  UNREACHABLE();
3964  }
3965  __ bind(&not_applicable);
3966 }
3967 
3968 
3969 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3970  __ push(ToRegister(instr->left()));
3971  __ push(ToRegister(instr->right()));
3972  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3973  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3974 }
3975 
3976 
3977 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3978  class DeferredStringCharCodeAt: public LDeferredCode {
3979  public:
3980  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3981  : LDeferredCode(codegen), instr_(instr) { }
3982  virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3983  virtual LInstruction* instr() { return instr_; }
3984  private:
3985  LStringCharCodeAt* instr_;
3986  };
3987 
3988  DeferredStringCharCodeAt* deferred =
3989  new(zone()) DeferredStringCharCodeAt(this, instr);
3991  ToRegister(instr->string()),
3992  ToRegister(instr->index()),
3993  ToRegister(instr->result()),
3994  deferred->entry());
3995  __ bind(deferred->exit());
3996 }
3997 
3998 
3999 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4000  Register string = ToRegister(instr->string());
4001  Register result = ToRegister(instr->result());
4002  Register scratch = scratch0();
4003 
4004  // TODO(3095996): Get rid of this. For now, we need to make the
4005  // result register contain a valid pointer because it is already
4006  // contained in the register pointer map.
4007  __ mov(result, zero_reg);
4008 
4009  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4010  __ push(string);
4011  // Push the index as a smi. This is safe because of the checks in
4012  // DoStringCharCodeAt above.
4013  if (instr->index()->IsConstantOperand()) {
4014  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4015  __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4016  __ push(scratch);
4017  } else {
4018  Register index = ToRegister(instr->index());
4019  __ SmiTag(index);
4020  __ push(index);
4021  }
4022  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4023  __ AssertSmi(v0);
4024  __ SmiUntag(v0);
4025  __ StoreToSafepointRegisterSlot(v0, result);
4026 }
4027 
4028 
4029 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4030  class DeferredStringCharFromCode: public LDeferredCode {
4031  public:
4032  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4033  : LDeferredCode(codegen), instr_(instr) { }
4034  virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4035  virtual LInstruction* instr() { return instr_; }
4036  private:
4037  LStringCharFromCode* instr_;
4038  };
4039 
4040  DeferredStringCharFromCode* deferred =
4041  new(zone()) DeferredStringCharFromCode(this, instr);
4042 
4043  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4044  Register char_code = ToRegister(instr->char_code());
4045  Register result = ToRegister(instr->result());
4046  Register scratch = scratch0();
4047  ASSERT(!char_code.is(result));
4048 
4049  __ Branch(deferred->entry(), hi,
4050  char_code, Operand(String::kMaxAsciiCharCode));
4051  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4052  __ sll(scratch, char_code, kPointerSizeLog2);
4053  __ Addu(result, result, scratch);
4054  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4055  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4056  __ Branch(deferred->entry(), eq, result, Operand(scratch));
4057  __ bind(deferred->exit());
4058 }
4059 
4060 
4061 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4062  Register char_code = ToRegister(instr->char_code());
4063  Register result = ToRegister(instr->result());
4064 
4065  // TODO(3095996): Get rid of this. For now, we need to make the
4066  // result register contain a valid pointer because it is already
4067  // contained in the register pointer map.
4068  __ mov(result, zero_reg);
4069 
4070  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4071  __ SmiTag(char_code);
4072  __ push(char_code);
4073  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4074  __ StoreToSafepointRegisterSlot(v0, result);
4075 }
4076 
4077 
4078 void LCodeGen::DoStringLength(LStringLength* instr) {
4079  Register string = ToRegister(instr->string());
4080  Register result = ToRegister(instr->result());
4081  __ lw(result, FieldMemOperand(string, String::kLengthOffset));
4082 }
4083 
4084 
4085 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4086  LOperand* input = instr->value();
4087  ASSERT(input->IsRegister() || input->IsStackSlot());
4088  LOperand* output = instr->result();
4089  ASSERT(output->IsDoubleRegister());
4090  FPURegister single_scratch = double_scratch0().low();
4091  if (input->IsStackSlot()) {
4092  Register scratch = scratch0();
4093  __ lw(scratch, ToMemOperand(input));
4094  __ mtc1(scratch, single_scratch);
4095  } else {
4096  __ mtc1(ToRegister(input), single_scratch);
4097  }
4098  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4099 }
4100 
4101 
4102 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4103  LOperand* input = instr->value();
4104  LOperand* output = instr->result();
4105 
4106  FPURegister dbl_scratch = double_scratch0();
4107  __ mtc1(ToRegister(input), dbl_scratch);
4108  __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4109 }
4110 
4111 
4112 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4113  class DeferredNumberTagI: public LDeferredCode {
4114  public:
4115  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4116  : LDeferredCode(codegen), instr_(instr) { }
4117  virtual void Generate() {
4118  codegen()->DoDeferredNumberTagI(instr_,
4119  instr_->value(),
4120  SIGNED_INT32);
4121  }
4122  virtual LInstruction* instr() { return instr_; }
4123  private:
4124  LNumberTagI* instr_;
4125  };
4126 
4127  Register src = ToRegister(instr->value());
4128  Register dst = ToRegister(instr->result());
4129  Register overflow = scratch0();
4130 
4131  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4132  __ SmiTagCheckOverflow(dst, src, overflow);
4133  __ BranchOnOverflow(deferred->entry(), overflow);
4134  __ bind(deferred->exit());
4135 }
4136 
4137 
4138 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4139  class DeferredNumberTagU: public LDeferredCode {
4140  public:
4141  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4142  : LDeferredCode(codegen), instr_(instr) { }
4143  virtual void Generate() {
4144  codegen()->DoDeferredNumberTagI(instr_,
4145  instr_->value(),
4146  UNSIGNED_INT32);
4147  }
4148  virtual LInstruction* instr() { return instr_; }
4149  private:
4150  LNumberTagU* instr_;
4151  };
4152 
4153  LOperand* input = instr->value();
4154  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4155  Register reg = ToRegister(input);
4156 
4157  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4158  __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue));
4159  __ SmiTag(reg, reg);
4160  __ bind(deferred->exit());
4161 }
4162 
4163 
4164 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4165  LOperand* value,
4166  IntegerSignedness signedness) {
4167  Label slow;
4168  Register src = ToRegister(value);
4169  Register dst = ToRegister(instr->result());
4170  FPURegister dbl_scratch = double_scratch0();
4171 
4172  // Preserve the value of all registers.
4173  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4174 
4175  Label done;
4176  if (signedness == SIGNED_INT32) {
4177  // There was overflow, so bits 30 and 31 of the original integer
4178  // disagree. Try to allocate a heap number in new space and store
4179  // the value in there. If that fails, call the runtime system.
4180  if (dst.is(src)) {
4181  __ SmiUntag(src, dst);
4182  __ Xor(src, src, Operand(0x80000000));
4183  }
4184  __ mtc1(src, dbl_scratch);
4185  __ cvt_d_w(dbl_scratch, dbl_scratch);
4186  } else {
4187  __ mtc1(src, dbl_scratch);
4188  __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4189  }
4190 
4191  if (FLAG_inline_new) {
4192  __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
4193  __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
4194  __ Move(dst, t1);
4195  __ Branch(&done);
4196  }
4197 
4198  // Slow case: Call the runtime system to do the number allocation.
4199  __ bind(&slow);
4200 
4201  // TODO(3095996): Put a valid pointer value in the stack slot where the result
4202  // register is stored, as this register is in the pointer map, but contains an
4203  // integer value.
4204  __ StoreToSafepointRegisterSlot(zero_reg, dst);
4205  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4206  __ Move(dst, v0);
4207 
4208  // Done. Put the value in dbl_scratch into the value of the allocated heap
4209  // number.
4210  __ bind(&done);
4211  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4212  __ StoreToSafepointRegisterSlot(dst, dst);
4213 }
4214 
4215 
4216 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4217  class DeferredNumberTagD: public LDeferredCode {
4218  public:
4219  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4220  : LDeferredCode(codegen), instr_(instr) { }
4221  virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4222  virtual LInstruction* instr() { return instr_; }
4223  private:
4224  LNumberTagD* instr_;
4225  };
4226 
4227  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4228  Register scratch = scratch0();
4229  Register reg = ToRegister(instr->result());
4230  Register temp1 = ToRegister(instr->temp());
4231  Register temp2 = ToRegister(instr->temp2());
4232 
4233  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4234  if (FLAG_inline_new) {
4235  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4236  __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4237  } else {
4238  __ Branch(deferred->entry());
4239  }
4240  __ bind(deferred->exit());
4241  __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4242 }
4243 
4244 
4245 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4246  // TODO(3095996): Get rid of this. For now, we need to make the
4247  // result register contain a valid pointer because it is already
4248  // contained in the register pointer map.
4249  Register reg = ToRegister(instr->result());
4250  __ mov(reg, zero_reg);
4251 
4252  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4253  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4254  __ StoreToSafepointRegisterSlot(v0, reg);
4255 }
4256 
4257 
4258 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4259  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4260  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4261 }
4262 
4263 
4264 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4265  Register scratch = scratch0();
4266  Register input = ToRegister(instr->value());
4267  Register result = ToRegister(instr->result());
4268  if (instr->needs_check()) {
4270  // If the input is a HeapObject, value of scratch won't be zero.
4271  __ And(scratch, input, Operand(kHeapObjectTag));
4272  __ SmiUntag(result, input);
4273  DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
4274  } else {
4275  __ SmiUntag(result, input);
4276  }
4277 }
4278 
4279 
4280 void LCodeGen::EmitNumberUntagD(Register input_reg,
4281  DoubleRegister result_reg,
4282  bool deoptimize_on_undefined,
4283  bool deoptimize_on_minus_zero,
4284  LEnvironment* env) {
4285  Register scratch = scratch0();
4286 
4287  Label load_smi, heap_number, done;
4288 
4289  // Smi check.
4290  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4291 
4292  // Heap number map check.
4293  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4294  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4295  if (deoptimize_on_undefined) {
4296  DeoptimizeIf(ne, env, scratch, Operand(at));
4297  } else {
4298  Label heap_number;
4299  __ Branch(&heap_number, eq, scratch, Operand(at));
4300 
4301  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4302  DeoptimizeIf(ne, env, input_reg, Operand(at));
4303 
4304  // Convert undefined to NaN.
4305  __ LoadRoot(at, Heap::kNanValueRootIndex);
4306  __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
4307  __ Branch(&done);
4308 
4309  __ bind(&heap_number);
4310  }
4311  // Heap number to double register conversion.
4312  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4313  if (deoptimize_on_minus_zero) {
4314  __ mfc1(at, result_reg.low());
4315  __ Branch(&done, ne, at, Operand(zero_reg));
4316  __ mfc1(scratch, result_reg.high());
4317  DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4318  }
4319  __ Branch(&done);
4320 
4321  // Smi to double register conversion
4322  __ bind(&load_smi);
4323  // scratch: untagged value of input_reg
4324  __ mtc1(scratch, result_reg);
4325  __ cvt_d_w(result_reg, result_reg);
4326  __ bind(&done);
4327 }
4328 
4329 
4330 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4331  Register input_reg = ToRegister(instr->value());
4332  Register scratch1 = scratch0();
4333  Register scratch2 = ToRegister(instr->temp());
4334  DoubleRegister double_scratch = double_scratch0();
4335  FPURegister single_scratch = double_scratch.low();
4336 
4337  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4338  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4339 
4340  Label done;
4341 
4342  // The input is a tagged HeapObject.
4343  // Heap number map check.
4344  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4345  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4346  // This 'at' value and scratch1 map value are used for tests in both clauses
4347  // of the if.
4348 
4349  if (instr->truncating()) {
4350  Register scratch3 = ToRegister(instr->temp2());
4351  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
4352  ASSERT(!scratch3.is(input_reg) &&
4353  !scratch3.is(scratch1) &&
4354  !scratch3.is(scratch2));
4355  // Performs a truncating conversion of a floating point number as used by
4356  // the JS bitwise operations.
4357  Label heap_number;
4358  __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
4359  // Check for undefined. Undefined is converted to zero for truncating
4360  // conversions.
4361  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4362  DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
4363  ASSERT(ToRegister(instr->result()).is(input_reg));
4364  __ mov(input_reg, zero_reg);
4365  __ Branch(&done);
4366 
4367  __ bind(&heap_number);
4368  __ ldc1(double_scratch2,
4370  __ EmitECMATruncate(input_reg,
4371  double_scratch2,
4372  single_scratch,
4373  scratch1,
4374  scratch2,
4375  scratch3);
4376  } else {
4377  // Deoptimize if we don't have a heap number.
4378  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
4379 
4380  // Load the double value.
4381  __ ldc1(double_scratch,
4383 
4384  Register except_flag = scratch2;
4385  __ EmitFPUTruncate(kRoundToZero,
4386  single_scratch,
4387  double_scratch,
4388  scratch1,
4389  except_flag,
4391 
4392  // Deopt if the operation did not succeed.
4393  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4394 
4395  // Load the result.
4396  __ mfc1(input_reg, single_scratch);
4397 
4398  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4399  __ Branch(&done, ne, input_reg, Operand(zero_reg));
4400 
4401  __ mfc1(scratch1, double_scratch.high());
4402  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4403  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4404  }
4405  }
4406  __ bind(&done);
4407 }
4408 
4409 
4410 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4411  class DeferredTaggedToI: public LDeferredCode {
4412  public:
4413  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4414  : LDeferredCode(codegen), instr_(instr) { }
4415  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4416  virtual LInstruction* instr() { return instr_; }
4417  private:
4418  LTaggedToI* instr_;
4419  };
4420 
4421  LOperand* input = instr->value();
4422  ASSERT(input->IsRegister());
4423  ASSERT(input->Equals(instr->result()));
4424 
4425  Register input_reg = ToRegister(input);
4426 
4427  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4428 
4429  // Let the deferred code handle the HeapObject case.
4430  __ JumpIfNotSmi(input_reg, deferred->entry());
4431 
4432  // Smi to int32 conversion.
4433  __ SmiUntag(input_reg);
4434  __ bind(deferred->exit());
4435 }
4436 
4437 
4438 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4439  LOperand* input = instr->value();
4440  ASSERT(input->IsRegister());
4441  LOperand* result = instr->result();
4442  ASSERT(result->IsDoubleRegister());
4443 
4444  Register input_reg = ToRegister(input);
4445  DoubleRegister result_reg = ToDoubleRegister(result);
4446 
4447  EmitNumberUntagD(input_reg, result_reg,
4448  instr->hydrogen()->deoptimize_on_undefined(),
4449  instr->hydrogen()->deoptimize_on_minus_zero(),
4450  instr->environment());
4451 }
4452 
4453 
4454 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4455  Register result_reg = ToRegister(instr->result());
4456  Register scratch1 = scratch0();
4457  Register scratch2 = ToRegister(instr->temp());
4458  DoubleRegister double_input = ToDoubleRegister(instr->value());
4459  FPURegister single_scratch = double_scratch0().low();
4460 
4461  if (instr->truncating()) {
4462  Register scratch3 = ToRegister(instr->temp2());
4463  __ EmitECMATruncate(result_reg,
4464  double_input,
4465  single_scratch,
4466  scratch1,
4467  scratch2,
4468  scratch3);
4469  } else {
4470  Register except_flag = scratch2;
4471 
4472  __ EmitFPUTruncate(kRoundToMinusInf,
4473  single_scratch,
4474  double_input,
4475  scratch1,
4476  except_flag,
4478 
4479  // Deopt if the operation did not succeed (except_flag != 0).
4480  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4481 
4482  // Load the result.
4483  __ mfc1(result_reg, single_scratch);
4484  }
4485 }
4486 
4487 
4488 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4489  LOperand* input = instr->value();
4490  __ And(at, ToRegister(input), Operand(kSmiTagMask));
4491  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
4492 }
4493 
4494 
4495 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4496  LOperand* input = instr->value();
4497  __ And(at, ToRegister(input), Operand(kSmiTagMask));
4498  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
4499 }
4500 
4501 
4502 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4503  Register input = ToRegister(instr->value());
4504  Register scratch = scratch0();
4505 
4506  __ GetObjectType(input, scratch, scratch);
4507 
4508  if (instr->hydrogen()->is_interval_check()) {
4509  InstanceType first;
4510  InstanceType last;
4511  instr->hydrogen()->GetCheckInterval(&first, &last);
4512 
4513  // If there is only one type in the interval check for equality.
4514  if (first == last) {
4515  DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
4516  } else {
4517  DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
4518  // Omit check for the last type.
4519  if (last != LAST_TYPE) {
4520  DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
4521  }
4522  }
4523  } else {
4524  uint8_t mask;
4525  uint8_t tag;
4526  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4527 
4528  if (IsPowerOf2(mask)) {
4529  ASSERT(tag == 0 || IsPowerOf2(tag));
4530  __ And(at, scratch, mask);
4531  DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
4532  at, Operand(zero_reg));
4533  } else {
4534  __ And(scratch, scratch, Operand(mask));
4535  DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
4536  }
4537  }
4538 }
4539 
4540 
4541 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4542  Register reg = ToRegister(instr->value());
4543  Handle<JSFunction> target = instr->hydrogen()->target();
4544  if (isolate()->heap()->InNewSpace(*target)) {
4545  Register reg = ToRegister(instr->value());
4546  Handle<JSGlobalPropertyCell> cell =
4547  isolate()->factory()->NewJSGlobalPropertyCell(target);
4548  __ li(at, Operand(Handle<Object>(cell)));
4550  DeoptimizeIf(ne, instr->environment(), reg,
4551  Operand(at));
4552  } else {
4553  DeoptimizeIf(ne, instr->environment(), reg,
4554  Operand(target));
4555  }
4556 }
4557 
4558 
4559 void LCodeGen::DoCheckMapCommon(Register reg,
4560  Register scratch,
4561  Handle<Map> map,
4562  CompareMapMode mode,
4563  LEnvironment* env) {
4564  Label success;
4565  __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
4566  DeoptimizeIf(al, env);
4567  __ bind(&success);
4568 }
4569 
4570 
4571 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4572  Register scratch = scratch0();
4573  LOperand* input = instr->value();
4574  ASSERT(input->IsRegister());
4575  Register reg = ToRegister(input);
4576  Label success;
4577  SmallMapList* map_set = instr->hydrogen()->map_set();
4578  for (int i = 0; i < map_set->length() - 1; i++) {
4579  Handle<Map> map = map_set->at(i);
4580  __ CompareMapAndBranch(
4581  reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP);
4582  }
4583  Handle<Map> map = map_set->last();
4584  DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
4585  __ bind(&success);
4586 }
4587 
4588 
4589 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4590  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4591  Register result_reg = ToRegister(instr->result());
4592  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
4593  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4594 }
4595 
4596 
4597 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4598  Register unclamped_reg = ToRegister(instr->unclamped());
4599  Register result_reg = ToRegister(instr->result());
4600  __ ClampUint8(result_reg, unclamped_reg);
4601 }
4602 
4603 
4604 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4605  Register scratch = scratch0();
4606  Register input_reg = ToRegister(instr->unclamped());
4607  Register result_reg = ToRegister(instr->result());
4608  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
4609  Label is_smi, done, heap_number;
4610 
4611  // Both smi and heap number cases are handled.
4612  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
4613 
4614  // Check for heap number
4615  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4616  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
4617 
4618  // Check for undefined. Undefined is converted to zero for clamping
4619  // conversions.
4620  DeoptimizeIf(ne, instr->environment(), input_reg,
4621  Operand(factory()->undefined_value()));
4622  __ mov(result_reg, zero_reg);
4623  __ jmp(&done);
4624 
4625  // Heap number
4626  __ bind(&heap_number);
4627  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
4629  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4630  __ jmp(&done);
4631 
4632  __ bind(&is_smi);
4633  __ ClampUint8(result_reg, scratch);
4634 
4635  __ bind(&done);
4636 }
4637 
4638 
4639 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4640  Register temp1 = ToRegister(instr->temp());
4641  Register temp2 = ToRegister(instr->temp2());
4642 
4643  Handle<JSObject> holder = instr->holder();
4644  Handle<JSObject> current_prototype = instr->prototype();
4645 
4646  // Load prototype object.
4647  __ LoadHeapObject(temp1, current_prototype);
4648 
4649  // Check prototype maps up to the holder.
4650  while (!current_prototype.is_identical_to(holder)) {
4651  DoCheckMapCommon(temp1, temp2,
4652  Handle<Map>(current_prototype->map()),
4653  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4654  current_prototype =
4655  Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4656  // Load next prototype object.
4657  __ LoadHeapObject(temp1, current_prototype);
4658  }
4659 
4660  // Check the holder map.
4661  DoCheckMapCommon(temp1, temp2,
4662  Handle<Map>(current_prototype->map()),
4663  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4664 }
4665 
4666 
4667 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4668  class DeferredAllocateObject: public LDeferredCode {
4669  public:
4670  DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4671  : LDeferredCode(codegen), instr_(instr) { }
4672  virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4673  virtual LInstruction* instr() { return instr_; }
4674  private:
4675  LAllocateObject* instr_;
4676  };
4677 
4678  DeferredAllocateObject* deferred =
4679  new(zone()) DeferredAllocateObject(this, instr);
4680 
4681  Register result = ToRegister(instr->result());
4682  Register scratch = ToRegister(instr->temp());
4683  Register scratch2 = ToRegister(instr->temp2());
4684  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4685  Handle<Map> initial_map(constructor->initial_map());
4686  int instance_size = initial_map->instance_size();
4687  ASSERT(initial_map->pre_allocated_property_fields() +
4688  initial_map->unused_property_fields() -
4689  initial_map->inobject_properties() == 0);
4690 
4691  // Allocate memory for the object. The initial map might change when
4692  // the constructor's prototype changes, but instance size and property
4693  // counts remain unchanged (if slack tracking finished).
4694  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4695  __ AllocateInNewSpace(instance_size,
4696  result,
4697  scratch,
4698  scratch2,
4699  deferred->entry(),
4700  TAG_OBJECT);
4701 
4702  __ bind(deferred->exit());
4703  if (FLAG_debug_code) {
4704  Label is_in_new_space;
4705  __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4706  __ Abort("Allocated object is not in new-space");
4707  __ bind(&is_in_new_space);
4708  }
4709 
4710  // Load the initial map.
4711  Register map = scratch;
4712  __ LoadHeapObject(map, constructor);
4714 
4715  // Initialize map and fields of the newly allocated object.
4716  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4717  __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
4718  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4719  __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
4720  __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
4721  if (initial_map->inobject_properties() != 0) {
4722  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4723  for (int i = 0; i < initial_map->inobject_properties(); i++) {
4724  int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4725  __ sw(scratch, FieldMemOperand(result, property_offset));
4726  }
4727  }
4728 }
4729 
4730 
4731 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4732  Register result = ToRegister(instr->result());
4733  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4734  Handle<Map> initial_map(constructor->initial_map());
4735  int instance_size = initial_map->instance_size();
4736 
4737  // TODO(3095996): Get rid of this. For now, we need to make the
4738  // result register contain a valid pointer because it is already
4739  // contained in the register pointer map.
4740  __ mov(result, zero_reg);
4741 
4742  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4743  __ li(a0, Operand(Smi::FromInt(instance_size)));
4744  __ push(a0);
4745  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4746  __ StoreToSafepointRegisterSlot(v0, result);
4747 }
4748 
4749 
4750 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4751  Handle<FixedArray> literals(instr->environment()->closure()->literals());
4752  ElementsKind boilerplate_elements_kind =
4753  instr->hydrogen()->boilerplate_elements_kind();
4754 
4755  // Deopt if the array literal boilerplate ElementsKind is of a type different
4756  // than the expected one. The check isn't necessary if the boilerplate has
4757  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4759  boilerplate_elements_kind, true)) {
4760  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
4761  // Load map into a2.
4763  // Load the map's "bit field 2".
4765  // Retrieve elements_kind from bit field 2.
4767  DeoptimizeIf(ne,
4768  instr->environment(),
4769  a2,
4770  Operand(boilerplate_elements_kind));
4771  }
4772 
4773  // Set up the parameters to the stub/runtime call.
4774  __ LoadHeapObject(a3, literals);
4775  __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4776  // Boilerplate already exists, constant elements are never accessed.
4777  // Pass an empty fixed array.
4778  __ li(a1, Operand(isolate()->factory()->empty_fixed_array()));
4779  __ Push(a3, a2, a1);
4780 
4781  // Pick the right runtime function or stub to call.
4782  int length = instr->hydrogen()->length();
4783  if (instr->hydrogen()->IsCopyOnWrite()) {
4784  ASSERT(instr->hydrogen()->depth() == 1);
4787  FastCloneShallowArrayStub stub(mode, length);
4788  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4789  } else if (instr->hydrogen()->depth() > 1) {
4790  CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4792  CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4793  } else {
4795  boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4798  FastCloneShallowArrayStub stub(mode, length);
4799  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4800  }
4801 }
4802 
4803 
4804 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4805  Register result,
4806  Register source,
4807  int* offset) {
4808  ASSERT(!source.is(a2));
4809  ASSERT(!result.is(a2));
4810 
4811  // Only elements backing stores for non-COW arrays need to be copied.
4812  Handle<FixedArrayBase> elements(object->elements());
4813  bool has_elements = elements->length() > 0 &&
4814  elements->map() != isolate()->heap()->fixed_cow_array_map();
4815 
4816  // Increase the offset so that subsequent objects end up right after
4817  // this object and its backing store.
4818  int object_offset = *offset;
4819  int object_size = object->map()->instance_size();
4820  int elements_offset = *offset + object_size;
4821  int elements_size = has_elements ? elements->Size() : 0;
4822  *offset += object_size + elements_size;
4823 
4824  // Copy object header.
4825  ASSERT(object->properties()->length() == 0);
4826  int inobject_properties = object->map()->inobject_properties();
4827  int header_size = object_size - inobject_properties * kPointerSize;
4828  for (int i = 0; i < header_size; i += kPointerSize) {
4829  if (has_elements && i == JSObject::kElementsOffset) {
4830  __ Addu(a2, result, Operand(elements_offset));
4831  } else {
4832  __ lw(a2, FieldMemOperand(source, i));
4833  }
4834  __ sw(a2, FieldMemOperand(result, object_offset + i));
4835  }
4836 
4837  // Copy in-object properties.
4838  for (int i = 0; i < inobject_properties; i++) {
4839  int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4840  Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4841  if (value->IsJSObject()) {
4842  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4843  __ Addu(a2, result, Operand(*offset));
4844  __ sw(a2, FieldMemOperand(result, total_offset));
4845  __ LoadHeapObject(source, value_object);
4846  EmitDeepCopy(value_object, result, source, offset);
4847  } else if (value->IsHeapObject()) {
4848  __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4849  __ sw(a2, FieldMemOperand(result, total_offset));
4850  } else {
4851  __ li(a2, Operand(value));
4852  __ sw(a2, FieldMemOperand(result, total_offset));
4853  }
4854  }
4855 
4856 
4857  if (has_elements) {
4858  // Copy elements backing store header.
4859  __ LoadHeapObject(source, elements);
4860  for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4861  __ lw(a2, FieldMemOperand(source, i));
4862  __ sw(a2, FieldMemOperand(result, elements_offset + i));
4863  }
4864 
4865  // Copy elements backing store content.
4866  int elements_length = has_elements ? elements->length() : 0;
4867  if (elements->IsFixedDoubleArray()) {
4868  Handle<FixedDoubleArray> double_array =
4870  for (int i = 0; i < elements_length; i++) {
4871  int64_t value = double_array->get_representation(i);
4872  // We only support little endian mode...
4873  int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
4874  int32_t value_high = static_cast<int32_t>(value >> 32);
4875  int total_offset =
4876  elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4877  __ li(a2, Operand(value_low));
4878  __ sw(a2, FieldMemOperand(result, total_offset));
4879  __ li(a2, Operand(value_high));
4880  __ sw(a2, FieldMemOperand(result, total_offset + 4));
4881  }
4882  } else if (elements->IsFixedArray()) {
4883  Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
4884  for (int i = 0; i < elements_length; i++) {
4885  int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4886  Handle<Object> value(fast_elements->get(i));
4887  if (value->IsJSObject()) {
4888  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4889  __ Addu(a2, result, Operand(*offset));
4890  __ sw(a2, FieldMemOperand(result, total_offset));
4891  __ LoadHeapObject(source, value_object);
4892  EmitDeepCopy(value_object, result, source, offset);
4893  } else if (value->IsHeapObject()) {
4894  __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4895  __ sw(a2, FieldMemOperand(result, total_offset));
4896  } else {
4897  __ li(a2, Operand(value));
4898  __ sw(a2, FieldMemOperand(result, total_offset));
4899  }
4900  }
4901  } else {
4902  UNREACHABLE();
4903  }
4904  }
4905 }
4906 
4907 
4908 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4909  int size = instr->hydrogen()->total_size();
4910  ElementsKind boilerplate_elements_kind =
4911  instr->hydrogen()->boilerplate()->GetElementsKind();
4912 
4913  // Deopt if the array literal boilerplate ElementsKind is of a type different
4914  // than the expected one. The check isn't necessary if the boilerplate has
4915  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4917  boilerplate_elements_kind, true)) {
4918  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4919  // Load map into a2.
4921  // Load the map's "bit field 2".
4923  // Retrieve elements_kind from bit field 2.
4925  DeoptimizeIf(ne, instr->environment(), a2,
4926  Operand(boilerplate_elements_kind));
4927  }
4928 
4929  // Allocate all objects that are part of the literal in one big
4930  // allocation. This avoids multiple limit checks.
4931  Label allocated, runtime_allocate;
4932  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4933  __ jmp(&allocated);
4934 
4935  __ bind(&runtime_allocate);
4936  __ li(a0, Operand(Smi::FromInt(size)));
4937  __ push(a0);
4938  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4939 
4940  __ bind(&allocated);
4941  int offset = 0;
4942  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4943  EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
4944  ASSERT_EQ(size, offset);
4945 }
4946 
4947 
4948 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4949  ASSERT(ToRegister(instr->result()).is(v0));
4950  Handle<FixedArray> literals(instr->environment()->closure()->literals());
4951  Handle<FixedArray> constant_properties =
4952  instr->hydrogen()->constant_properties();
4953 
4954  // Set up the parameters to the stub/runtime call.
4955  __ LoadHeapObject(t0, literals);
4956  __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4957  __ li(a2, Operand(constant_properties));
4958  int flags = instr->hydrogen()->fast_elements()
4961  __ li(a1, Operand(Smi::FromInt(flags)));
4962  __ Push(t0, a3, a2, a1);
4963 
4964  // Pick the right runtime function or stub to call.
4965  int properties_count = constant_properties->length() / 2;
4966  if (instr->hydrogen()->depth() > 1) {
4967  CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4968  } else if (flags != ObjectLiteral::kFastElements ||
4970  CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4971  } else {
4972  FastCloneShallowObjectStub stub(properties_count);
4973  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4974  }
4975 }
4976 
4977 
4978 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4979  ASSERT(ToRegister(instr->value()).is(a0));
4980  ASSERT(ToRegister(instr->result()).is(v0));
4981  __ push(a0);
4982  CallRuntime(Runtime::kToFastProperties, 1, instr);
4983 }
4984 
4985 
4986 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4987  Label materialized;
4988  // Registers will be used as follows:
4989  // t3 = literals array.
4990  // a1 = regexp literal.
4991  // a0 = regexp literal clone.
4992  // a2 and t0-t2 are used as temporaries.
4993  int literal_offset =
4994  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
4995  __ LoadHeapObject(t3, instr->hydrogen()->literals());
4996  __ lw(a1, FieldMemOperand(t3, literal_offset));
4997  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4998  __ Branch(&materialized, ne, a1, Operand(at));
4999 
5000  // Create regexp literal using runtime function
5001  // Result will be in v0.
5002  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5003  __ li(t1, Operand(instr->hydrogen()->pattern()));
5004  __ li(t0, Operand(instr->hydrogen()->flags()));
5005  __ Push(t3, t2, t1, t0);
5006  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5007  __ mov(a1, v0);
5008 
5009  __ bind(&materialized);
5011  Label allocated, runtime_allocate;
5012 
5013  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5014  __ jmp(&allocated);
5015 
5016  __ bind(&runtime_allocate);
5017  __ li(a0, Operand(Smi::FromInt(size)));
5018  __ Push(a1, a0);
5019  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5020  __ pop(a1);
5021 
5022  __ bind(&allocated);
5023  // Copy the content into the newly allocated memory.
5024  // (Unroll copy loop once for better throughput).
5025  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5026  __ lw(a3, FieldMemOperand(a1, i));
5027  __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
5028  __ sw(a3, FieldMemOperand(v0, i));
5029  __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
5030  }
5031  if ((size % (2 * kPointerSize)) != 0) {
5032  __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
5033  __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
5034  }
5035 }
5036 
5037 
5038 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5039  // Use the fast case closure allocation code that allocates in new
5040  // space for nested functions that don't need literals cloning.
5041  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
5042  bool pretenure = instr->hydrogen()->pretenure();
5043  if (!pretenure && shared_info->num_literals() == 0) {
5044  FastNewClosureStub stub(shared_info->language_mode());
5045  __ li(a1, Operand(shared_info));
5046  __ push(a1);
5047  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5048  } else {
5049  __ li(a2, Operand(shared_info));
5050  __ li(a1, Operand(pretenure
5051  ? factory()->true_value()
5052  : factory()->false_value()));
5053  __ Push(cp, a2, a1);
5054  CallRuntime(Runtime::kNewClosure, 3, instr);
5055  }
5056 }
5057 
5058 
5059 void LCodeGen::DoTypeof(LTypeof* instr) {
5060  ASSERT(ToRegister(instr->result()).is(v0));
5061  Register input = ToRegister(instr->value());
5062  __ push(input);
5063  CallRuntime(Runtime::kTypeof, 1, instr);
5064 }
5065 
5066 
5067 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5068  Register input = ToRegister(instr->value());
5069  int true_block = chunk_->LookupDestination(instr->true_block_id());
5070  int false_block = chunk_->LookupDestination(instr->false_block_id());
5071  Label* true_label = chunk_->GetAssemblyLabel(true_block);
5072  Label* false_label = chunk_->GetAssemblyLabel(false_block);
5073 
5074  Register cmp1 = no_reg;
5075  Operand cmp2 = Operand(no_reg);
5076 
5077  Condition final_branch_condition = EmitTypeofIs(true_label,
5078  false_label,
5079  input,
5080  instr->type_literal(),
5081  cmp1,
5082  cmp2);
5083 
5084  ASSERT(cmp1.is_valid());
5085  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
5086 
5087  if (final_branch_condition != kNoCondition) {
5088  EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
5089  }
5090 }
5091 
5092 
5093 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5094  Label* false_label,
5095  Register input,
5096  Handle<String> type_name,
5097  Register& cmp1,
5098  Operand& cmp2) {
5099  // This function utilizes the delay slot heavily. This is used to load
5100  // values that are always usable without depending on the type of the input
5101  // register.
5102  Condition final_branch_condition = kNoCondition;
5103  Register scratch = scratch0();
5104  if (type_name->Equals(heap()->number_symbol())) {
5105  __ JumpIfSmi(input, true_label);
5106  __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5107  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5108  cmp1 = input;
5109  cmp2 = Operand(at);
5110  final_branch_condition = eq;
5111 
5112  } else if (type_name->Equals(heap()->string_symbol())) {
5113  __ JumpIfSmi(input, false_label);
5114  __ GetObjectType(input, input, scratch);
5115  __ Branch(USE_DELAY_SLOT, false_label,
5116  ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5117  // input is an object so we can load the BitFieldOffset even if we take the
5118  // other branch.
5119  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5120  __ And(at, at, 1 << Map::kIsUndetectable);
5121  cmp1 = at;
5122  cmp2 = Operand(zero_reg);
5123  final_branch_condition = eq;
5124 
5125  } else if (type_name->Equals(heap()->boolean_symbol())) {
5126  __ LoadRoot(at, Heap::kTrueValueRootIndex);
5127  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5128  __ LoadRoot(at, Heap::kFalseValueRootIndex);
5129  cmp1 = at;
5130  cmp2 = Operand(input);
5131  final_branch_condition = eq;
5132 
5133  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
5134  __ LoadRoot(at, Heap::kNullValueRootIndex);
5135  cmp1 = at;
5136  cmp2 = Operand(input);
5137  final_branch_condition = eq;
5138 
5139  } else if (type_name->Equals(heap()->undefined_symbol())) {
5140  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5141  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5142  // The first instruction of JumpIfSmi is an And - it is safe in the delay
5143  // slot.
5144  __ JumpIfSmi(input, false_label);
5145  // Check for undetectable objects => true.
5146  __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5147  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5148  __ And(at, at, 1 << Map::kIsUndetectable);
5149  cmp1 = at;
5150  cmp2 = Operand(zero_reg);
5151  final_branch_condition = ne;
5152 
5153  } else if (type_name->Equals(heap()->function_symbol())) {
5155  __ JumpIfSmi(input, false_label);
5156  __ GetObjectType(input, scratch, input);
5157  __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5158  cmp1 = input;
5159  cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
5160  final_branch_condition = eq;
5161 
5162  } else if (type_name->Equals(heap()->object_symbol())) {
5163  __ JumpIfSmi(input, false_label);
5164  if (!FLAG_harmony_typeof) {
5165  __ LoadRoot(at, Heap::kNullValueRootIndex);
5166  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5167  }
5168  // input is an object, it is safe to use GetObjectType in the delay slot.
5169  __ GetObjectType(input, input, scratch);
5170  __ Branch(USE_DELAY_SLOT, false_label,
5171  lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
5172  // Still an object, so the InstanceType can be loaded.
5173  __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
5174  __ Branch(USE_DELAY_SLOT, false_label,
5175  gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
5176  // Still an object, so the BitField can be loaded.
5177  // Check for undetectable objects => false.
5178  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5179  __ And(at, at, 1 << Map::kIsUndetectable);
5180  cmp1 = at;
5181  cmp2 = Operand(zero_reg);
5182  final_branch_condition = eq;
5183 
5184  } else {
5185  cmp1 = at;
5186  cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5187  __ Branch(false_label);
5188  }
5189 
5190  return final_branch_condition;
5191 }
5192 
5193 
5194 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5195  Register temp1 = ToRegister(instr->temp());
5196  int true_block = chunk_->LookupDestination(instr->true_block_id());
5197  int false_block = chunk_->LookupDestination(instr->false_block_id());
5198 
5199  EmitIsConstructCall(temp1, scratch0());
5200 
5201  EmitBranch(true_block, false_block, eq, temp1,
5202  Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5203 }
5204 
5205 
5206 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5207  ASSERT(!temp1.is(temp2));
5208  // Get the frame pointer for the calling frame.
5210 
5211  // Skip the arguments adaptor frame if it exists.
5212  Label check_frame_marker;
5214  __ Branch(&check_frame_marker, ne, temp2,
5217 
5218  // Check the marker in the calling frame.
5219  __ bind(&check_frame_marker);
5221 }
5222 
5223 
5224 void LCodeGen::EnsureSpaceForLazyDeopt() {
5225  // Ensure that we have enough space after the previous lazy-bailout
5226  // instruction for patching the code here.
5227  int current_pc = masm()->pc_offset();
5228  int patch_size = Deoptimizer::patch_size();
5229  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5230  int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5231  ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5232  while (padding_size > 0) {
5233  __ nop();
5234  padding_size -= Assembler::kInstrSize;
5235  }
5236  }
5237  last_lazy_deopt_pc_ = masm()->pc_offset();
5238 }
5239 
5240 
5241 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5242  EnsureSpaceForLazyDeopt();
5243  ASSERT(instr->HasEnvironment());
5244  LEnvironment* env = instr->environment();
5245  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5246  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5247 }
5248 
5249 
5250 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5251  DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
5252 }
5253 
5254 
5255 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5256  Register object = ToRegister(instr->object());
5257  Register key = ToRegister(instr->key());
5258  Register strict = scratch0();
5259  __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
5260  __ Push(object, key, strict);
5261  ASSERT(instr->HasPointerMap());
5262  LPointerMap* pointers = instr->pointer_map();
5263  RecordPosition(pointers->position());
5264  SafepointGenerator safepoint_generator(
5265  this, pointers, Safepoint::kLazyDeopt);
5266  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
5267 }
5268 
5269 
5270 void LCodeGen::DoIn(LIn* instr) {
5271  Register obj = ToRegister(instr->object());
5272  Register key = ToRegister(instr->key());
5273  __ Push(key, obj);
5274  ASSERT(instr->HasPointerMap());
5275  LPointerMap* pointers = instr->pointer_map();
5276  RecordPosition(pointers->position());
5277  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
5278  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
5279 }
5280 
5281 
5282 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5283  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5284  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5285  RecordSafepointWithLazyDeopt(
5286  instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5287  ASSERT(instr->HasEnvironment());
5288  LEnvironment* env = instr->environment();
5289  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5290 }
5291 
5292 
5293 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5294  class DeferredStackCheck: public LDeferredCode {
5295  public:
5296  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5297  : LDeferredCode(codegen), instr_(instr) { }
5298  virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5299  virtual LInstruction* instr() { return instr_; }
5300  private:
5301  LStackCheck* instr_;
5302  };
5303 
5304  ASSERT(instr->HasEnvironment());
5305  LEnvironment* env = instr->environment();
5306  // There is no LLazyBailout instruction for stack-checks. We have to
5307  // prepare for lazy deoptimization explicitly here.
5308  if (instr->hydrogen()->is_function_entry()) {
5309  // Perform stack overflow check.
5310  Label done;
5311  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5312  __ Branch(&done, hs, sp, Operand(at));
5313  StackCheckStub stub;
5314  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5315  EnsureSpaceForLazyDeopt();
5316  __ bind(&done);
5317  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5318  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5319  } else {
5320  ASSERT(instr->hydrogen()->is_backwards_branch());
5321  // Perform stack overflow check if this goto needs it before jumping.
5322  DeferredStackCheck* deferred_stack_check =
5323  new(zone()) DeferredStackCheck(this, instr);
5324  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5325  __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5326  EnsureSpaceForLazyDeopt();
5327  __ bind(instr->done_label());
5328  deferred_stack_check->SetExit(instr->done_label());
5329  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5330  // Don't record a deoptimization index for the safepoint here.
5331  // This will be done explicitly when emitting call and the safepoint in
5332  // the deferred code.
5333  }
5334 }
5335 
5336 
5337 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5338  // This is a pseudo-instruction that ensures that the environment here is
5339  // properly registered for deoptimization and records the assembler's PC
5340  // offset.
5341  LEnvironment* environment = instr->environment();
5342  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5343  instr->SpilledDoubleRegisterArray());
5344 
5345  // If the environment were already registered, we would have no way of
5346  // backpatching it with the spill slot operands.
5347  ASSERT(!environment->HasBeenRegistered());
5348  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5349  ASSERT(osr_pc_offset_ == -1);
5350  osr_pc_offset_ = masm()->pc_offset();
5351 }
5352 
5353 
5354 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5355  Register result = ToRegister(instr->result());
5356  Register object = ToRegister(instr->object());
5357  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5358  DeoptimizeIf(eq, instr->environment(), object, Operand(at));
5359 
5360  Register null_value = t1;
5361  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5362  DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
5363 
5364  __ And(at, object, kSmiTagMask);
5365  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5366 
5368  __ GetObjectType(object, a1, a1);
5369  DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5370 
5371  Label use_cache, call_runtime;
5372  ASSERT(object.is(a0));
5373  __ CheckEnumCache(null_value, &call_runtime);
5374 
5375  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5376  __ Branch(&use_cache);
5377 
5378  // Get the set of properties to enumerate.
5379  __ bind(&call_runtime);
5380  __ push(object);
5381  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5382 
5384  ASSERT(result.is(v0));
5385  __ LoadRoot(at, Heap::kMetaMapRootIndex);
5386  DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5387  __ bind(&use_cache);
5388 }
5389 
5390 
5391 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5392  Register map = ToRegister(instr->map());
5393  Register result = ToRegister(instr->result());
5394  Label load_cache, done;
5395  __ EnumLength(result, map);
5396  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5397  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5398  __ jmp(&done);
5399 
5400  __ bind(&load_cache);
5401  __ LoadInstanceDescriptors(map, result);
5402  __ lw(result,
5404  __ lw(result,
5405  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5406  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5407 
5408  __ bind(&done);
5409 }
5410 
5411 
5412 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5413  Register object = ToRegister(instr->value());
5414  Register map = ToRegister(instr->map());
5415  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5416  DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5417 }
5418 
5419 
5420 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5421  Register object = ToRegister(instr->object());
5422  Register index = ToRegister(instr->index());
5423  Register result = ToRegister(instr->result());
5424  Register scratch = scratch0();
5425 
5426  Label out_of_object, done;
5427  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5428  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5429 
5431  __ Addu(scratch, object, scratch);
5432  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5433 
5434  __ Branch(&done);
5435 
5436  __ bind(&out_of_object);
5437  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5438  // Index is equal to negated out of object property index plus 1.
5439  __ Subu(scratch, result, scratch);
5440  __ lw(result, FieldMemOperand(scratch,
5441  FixedArray::kHeaderSize - kPointerSize));
5442  __ bind(&done);
5443 }
5444 
5445 
5446 #undef __
5447 
5448 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
const FPURegister f4
const int kMinInt
Definition: globals.h:211
static const int kBitFieldOffset
Definition: objects.h:5160
static LGap * cast(LInstruction *instr)
Definition: lithium-arm.h:327
const intptr_t kSmiTagMask
Definition: v8.h:4016
static const int kCodeEntryOffset
Definition: objects.h:6182
static const int kMaxAsciiCharCode
Definition: objects.h:7327
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:6183
static int SlotOffset(int index)
Definition: contexts.h:425
static const int kEnumCacheOffset
Definition: objects.h:2632
const Register cp
const FPURegister f0
static Smi * FromInt(int value)
Definition: objects-inl.h:981
bool IsFastObjectElementsKind(ElementsKind kind)
const int KB
Definition: globals.h:207
static const int kElementsKindBitCount
Definition: objects.h:5182
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
Definition: handles.h:81
const FPURegister f22
static const int kGlobalReceiverOffset
Definition: objects.h:6288
static const int kExponentBias
Definition: objects.h:1356
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:38
static const int kExternalPointerOffset
Definition: objects.h:3741
static const int kSize
Definition: objects.h:6625
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
Definition: globals.h:232
static const int kInObjectFieldCount
Definition: objects.h:6679
static const int kMaximumSlots
Definition: code-stubs.h:344
MemOperand GlobalObjectOperand()
static const int kInstanceClassNameOffset
Definition: objects.h:5800
int WhichPowerOf2(uint32_t x)
Definition: utils.h:56
bool is_uint32(int64_t x)
Definition: assembler-x64.h:48
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:6187
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
Definition: handles.cc:326
#define V8_INFINITY
Definition: globals.h:32
static const int kHashFieldOffset
Definition: objects.h:7319
static DwVfpRegister FromAllocationIndex(int index)
Condition ReverseCondition(Condition cond)
#define __
#define IN
const Register sp
const uint32_t kSlotsZapValue
Definition: v8globals.h:83
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7318
static const int kExponentShift
Definition: objects.h:1357
FPURegister FloatRegister
static const int kValueOffset
Definition: objects.h:1342
const uint32_t kHoleNanUpper32
Definition: v8globals.h:469
const int kPointerSize
Definition: globals.h:220
static void MaybeCallEntryHook(MacroAssembler *masm)
const int kHeapObjectTag
Definition: v8.h:4009
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:271
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
static const int kCacheStampOffset
Definition: objects.h:6476
static const int kPropertiesOffset
Definition: objects.h:2171
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
Definition: objects.h:2439
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kElementsOffset
Definition: objects.h:2172
static const int kContainsCachedArrayIndexMask
Definition: objects.h:7374
bool IsPowerOf2(T x)
Definition: utils.h:50
const FPURegister f2
static Vector< T > New(int length)
Definition: utils.h:370
friend class BlockTrampolinePoolScope
int ElementsKindToShiftSize(ElementsKind elements_kind)
Definition: lithium.cc:230
Vector< const char > CStrVector(const char *data)
Definition: utils.h:526
static int OffsetOfElementAt(int index)
Definition: objects.h:2356
static const int kLengthOffset
Definition: objects.h:8332
static int SizeFor(int length)
Definition: objects.h:2353
static const int kHeaderSize
Definition: objects.h:2296
static const int kMapOffset
Definition: objects.h:1261
static const int kValueOffset
Definition: objects.h:6468
static const int kLengthOffset
Definition: objects.h:2295
#define kDoubleRegZero
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:457
MemOperand FieldMemOperand(Register object, int offset)
static const int kHasNonInstancePrototype
Definition: objects.h:5167
ElementsKind GetInitialFastElementsKind()
static const uint32_t kSignMask
Definition: objects.h:1351
const int kSmiTagSize
Definition: v8.h:4015
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
static const int kElementsKindShift
Definition: objects.h:5181
SwVfpRegister low() const
static const int kConstructorOffset
Definition: objects.h:5127
static double canonical_not_the_hole_nan_as_double()
Definition: objects-inl.h:1776
static const int kIsUndetectable
Definition: objects.h:5171
static const int kHeaderSize
Definition: objects.h:2173
const FPURegister f12
static const int kInstrSize
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPrototypeOffset
Definition: objects.h:5126
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kValueOffset
Definition: objects.h:6385
const Register fp
static const int kNativeContextOffset
Definition: objects.h:6286
static const int kExponentBits
Definition: objects.h:1355
static const int kCompilerHintsOffset
Definition: objects.h:5868
static const int kSharedFunctionInfoOffset
Definition: objects.h:6185
Register ToRegister(int num)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
const FPURegister f14
static const int kMaxValue
Definition: objects.h:1050
static const int kBitField2Offset
Definition: objects.h:5161
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
Definition: ic.cc:2565
#define ARRAY_SIZE(a)
Definition: globals.h:281
static const int kExponentOffset
Definition: objects.h:1348
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1258
static JSObject * cast(Object *obj)
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
Definition: objects.h:5158
virtual void BeforeCall(int call_size) const
static const int kMantissaOffset
Definition: objects.h:1347