v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.7
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 #include "hydrogen-osr.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 class SafepointGenerator V8_FINAL : public CallWrapper {
41  public:
43  LPointerMap* pointers,
44  Safepoint::DeoptMode mode)
45  : codegen_(codegen),
46  pointers_(pointers),
47  deopt_mode_(mode) { }
48  virtual ~SafepointGenerator() {}
49 
50  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
51 
52  virtual void AfterCall() const V8_OVERRIDE {
53  codegen_->RecordSafepoint(pointers_, deopt_mode_);
54  }
55 
56  private:
57  LCodeGen* codegen_;
58  LPointerMap* pointers_;
59  Safepoint::DeoptMode deopt_mode_;
60 };
61 
62 
63 #define __ masm()->
64 
66  LPhase phase("Z_Code generation", chunk());
67  ASSERT(is_unused());
68  status_ = GENERATING;
69 
70  // Open a frame scope to indicate that there is a frame on the stack. The
71  // NONE indicates that the scope shouldn't actually generate code to set up
72  // the frame (that is done in GeneratePrologue).
73  FrameScope frame_scope(masm_, StackFrame::NONE);
74 
75  return GeneratePrologue() &&
76  GenerateBody() &&
77  GenerateDeferredCode() &&
78  GenerateDeoptJumpTable() &&
79  GenerateSafepointTable();
80 }
81 
82 
83 void LCodeGen::FinishCode(Handle<Code> code) {
84  ASSERT(is_done());
85  code->set_stack_slots(GetStackSlotCount());
86  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
88  PopulateDeoptimizationData(code);
89  info()->CommitDependencies(code);
90 }
91 
92 
93 void LChunkBuilder::Abort(BailoutReason reason) {
94  info()->set_bailout_reason(reason);
95  status_ = ABORTED;
96 }
97 
98 
99 void LCodeGen::SaveCallerDoubles() {
100  ASSERT(info()->saves_caller_doubles());
102  Comment(";;; Save clobbered callee double registers");
103  int count = 0;
104  BitVector* doubles = chunk()->allocated_double_registers();
105  BitVector::Iterator save_iterator(doubles);
106  while (!save_iterator.Done()) {
107  __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
108  MemOperand(sp, count * kDoubleSize));
109  save_iterator.Advance();
110  count++;
111  }
112 }
113 
114 
115 void LCodeGen::RestoreCallerDoubles() {
116  ASSERT(info()->saves_caller_doubles());
118  Comment(";;; Restore clobbered callee double registers");
119  BitVector* doubles = chunk()->allocated_double_registers();
120  BitVector::Iterator save_iterator(doubles);
121  int count = 0;
122  while (!save_iterator.Done()) {
123  __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
124  MemOperand(sp, count * kDoubleSize));
125  save_iterator.Advance();
126  count++;
127  }
128 }
129 
130 
131 bool LCodeGen::GeneratePrologue() {
132  ASSERT(is_generating());
133 
134  if (info()->IsOptimizing()) {
136 
137 #ifdef DEBUG
138  if (strlen(FLAG_stop_at) > 0 &&
139  info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
140  __ stop("stop_at");
141  }
142 #endif
143 
144  // a1: Callee's JS function.
145  // cp: Callee's context.
146  // fp: Caller's frame pointer.
147  // lr: Caller's pc.
148 
149  // Sloppy mode functions and builtins need to replace the receiver with the
150  // global proxy when called as functions (without an explicit receiver
151  // object).
152  if (info_->this_has_uses() &&
153  info_->strict_mode() == SLOPPY &&
154  !info_->is_native()) {
155  Label ok;
156  int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
157  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
158  __ lw(a2, MemOperand(sp, receiver_offset));
159  __ Branch(&ok, ne, a2, Operand(at));
160 
161  __ lw(a2, GlobalObjectOperand());
163 
164  __ sw(a2, MemOperand(sp, receiver_offset));
165 
166  __ bind(&ok);
167  }
168  }
169 
170  info()->set_prologue_offset(masm_->pc_offset());
171  if (NeedsEagerFrame()) {
172  __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
173  frame_is_built_ = true;
174  info_->AddNoFrameRange(0, masm_->pc_offset());
175  }
176 
177  // Reserve space for the stack slots needed by the code.
178  int slots = GetStackSlotCount();
179  if (slots > 0) {
180  if (FLAG_debug_code) {
181  __ Subu(sp, sp, Operand(slots * kPointerSize));
182  __ Push(a0, a1);
183  __ Addu(a0, sp, Operand(slots * kPointerSize));
184  __ li(a1, Operand(kSlotsZapValue));
185  Label loop;
186  __ bind(&loop);
187  __ Subu(a0, a0, Operand(kPointerSize));
188  __ sw(a1, MemOperand(a0, 2 * kPointerSize));
189  __ Branch(&loop, ne, a0, Operand(sp));
190  __ Pop(a0, a1);
191  } else {
192  __ Subu(sp, sp, Operand(slots * kPointerSize));
193  }
194  }
195 
196  if (info()->saves_caller_doubles()) {
197  SaveCallerDoubles();
198  }
199 
200  // Possibly allocate a local context.
201  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
202  if (heap_slots > 0) {
203  Comment(";;; Allocate local context");
204  // Argument to NewContext is the function, which is in a1.
205  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
206  FastNewContextStub stub(heap_slots);
207  __ CallStub(&stub);
208  } else {
209  __ push(a1);
210  __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
211  }
212  RecordSafepoint(Safepoint::kNoLazyDeopt);
213  // Context is returned in both v0. It replaces the context passed to us.
214  // It's saved in the stack and kept live in cp.
215  __ mov(cp, v0);
217  // Copy any necessary parameters into the context.
218  int num_parameters = scope()->num_parameters();
219  for (int i = 0; i < num_parameters; i++) {
220  Variable* var = scope()->parameter(i);
221  if (var->IsContextSlot()) {
222  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
223  (num_parameters - 1 - i) * kPointerSize;
224  // Load parameter from stack.
225  __ lw(a0, MemOperand(fp, parameter_offset));
226  // Store it in the context.
227  MemOperand target = ContextOperand(cp, var->index());
228  __ sw(a0, target);
229  // Update the write barrier. This clobbers a3 and a0.
230  __ RecordWriteContextSlot(
231  cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
232  }
233  }
234  Comment(";;; End allocate local context");
235  }
236 
237  // Trace the call.
238  if (FLAG_trace && info()->IsOptimizing()) {
239  // We have not executed any compiled code yet, so cp still holds the
240  // incoming context.
241  __ CallRuntime(Runtime::kTraceEnter, 0);
242  }
243  return !is_aborted();
244 }
245 
246 
247 void LCodeGen::GenerateOsrPrologue() {
248  // Generate the OSR entry prologue at the first unknown OSR value, or if there
249  // are none, at the OSR entrypoint instruction.
250  if (osr_pc_offset_ >= 0) return;
251 
252  osr_pc_offset_ = masm()->pc_offset();
253 
254  // Adjust the frame size, subsuming the unoptimized frame into the
255  // optimized frame.
256  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
257  ASSERT(slots >= 0);
258  __ Subu(sp, sp, Operand(slots * kPointerSize));
259 }
260 
261 
262 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
263  if (instr->IsCall()) {
264  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
265  }
266  if (!instr->IsLazyBailout() && !instr->IsGap()) {
267  safepoints_.BumpLastLazySafepointIndex();
268  }
269 }
270 
271 
272 bool LCodeGen::GenerateDeferredCode() {
273  ASSERT(is_generating());
274  if (deferred_.length() > 0) {
275  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
276  LDeferredCode* code = deferred_[i];
277 
278  HValue* value =
279  instructions_->at(code->instruction_index())->hydrogen_value();
280  RecordAndWritePosition(
281  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
282 
283  Comment(";;; <@%d,#%d> "
284  "-------------------- Deferred %s --------------------",
285  code->instruction_index(),
286  code->instr()->hydrogen_value()->id(),
287  code->instr()->Mnemonic());
288  __ bind(code->entry());
289  if (NeedsDeferredFrame()) {
290  Comment(";;; Build frame");
291  ASSERT(!frame_is_built_);
292  ASSERT(info()->IsStub());
293  frame_is_built_ = true;
294  __ MultiPush(cp.bit() | fp.bit() | ra.bit());
295  __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
296  __ push(scratch0());
298  Comment(";;; Deferred code");
299  }
300  code->Generate();
301  if (NeedsDeferredFrame()) {
302  Comment(";;; Destroy frame");
303  ASSERT(frame_is_built_);
304  __ pop(at);
305  __ MultiPop(cp.bit() | fp.bit() | ra.bit());
306  frame_is_built_ = false;
307  }
308  __ jmp(code->exit());
309  }
310  }
311  // Deferred code is the last part of the instruction sequence. Mark
312  // the generated code as done unless we bailed out.
313  if (!is_aborted()) status_ = DONE;
314  return !is_aborted();
315 }
316 
317 
318 bool LCodeGen::GenerateDeoptJumpTable() {
319  if (deopt_jump_table_.length() > 0) {
320  Comment(";;; -------------------- Jump table --------------------");
321  }
322  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
323  Label table_start;
324  __ bind(&table_start);
325  Label needs_frame;
326  for (int i = 0; i < deopt_jump_table_.length(); i++) {
327  __ bind(&deopt_jump_table_[i].label);
328  Address entry = deopt_jump_table_[i].address;
329  Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
330  int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
332  Comment(";;; jump table entry %d.", i);
333  } else {
334  Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
335  }
336  __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
337  if (deopt_jump_table_[i].needs_frame) {
338  ASSERT(!info()->saves_caller_doubles());
339  if (needs_frame.is_bound()) {
340  __ Branch(&needs_frame);
341  } else {
342  __ bind(&needs_frame);
343  __ MultiPush(cp.bit() | fp.bit() | ra.bit());
344  // This variant of deopt can only be used with stubs. Since we don't
345  // have a function pointer to install in the stack frame that we're
346  // building, install a special marker there instead.
347  ASSERT(info()->IsStub());
348  __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
349  __ push(scratch0());
351  __ Call(t9);
352  }
353  } else {
354  if (info()->saves_caller_doubles()) {
355  ASSERT(info()->IsStub());
356  RestoreCallerDoubles();
357  }
358  __ Call(t9);
359  }
360  }
361  __ RecordComment("]");
362 
363  // The deoptimization jump table is the last part of the instruction
364  // sequence. Mark the generated code as done unless we bailed out.
365  if (!is_aborted()) status_ = DONE;
366  return !is_aborted();
367 }
368 
369 
370 bool LCodeGen::GenerateSafepointTable() {
371  ASSERT(is_done());
372  safepoints_.Emit(masm(), GetStackSlotCount());
373  return !is_aborted();
374 }
375 
376 
377 Register LCodeGen::ToRegister(int index) const {
378  return Register::FromAllocationIndex(index);
379 }
380 
381 
384 }
385 
386 
387 Register LCodeGen::ToRegister(LOperand* op) const {
388  ASSERT(op->IsRegister());
389  return ToRegister(op->index());
390 }
391 
392 
393 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
394  if (op->IsRegister()) {
395  return ToRegister(op->index());
396  } else if (op->IsConstantOperand()) {
397  LConstantOperand* const_op = LConstantOperand::cast(op);
398  HConstant* constant = chunk_->LookupConstant(const_op);
399  Handle<Object> literal = constant->handle(isolate());
400  Representation r = chunk_->LookupLiteralRepresentation(const_op);
401  if (r.IsInteger32()) {
402  ASSERT(literal->IsNumber());
403  __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
404  } else if (r.IsSmi()) {
405  ASSERT(constant->HasSmiValue());
406  __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
407  } else if (r.IsDouble()) {
408  Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
409  } else {
410  ASSERT(r.IsSmiOrTagged());
411  __ li(scratch, literal);
412  }
413  return scratch;
414  } else if (op->IsStackSlot()) {
415  __ lw(scratch, ToMemOperand(op));
416  return scratch;
417  }
418  UNREACHABLE();
419  return scratch;
420 }
421 
422 
423 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
424  ASSERT(op->IsDoubleRegister());
425  return ToDoubleRegister(op->index());
426 }
427 
428 
430  FloatRegister flt_scratch,
431  DoubleRegister dbl_scratch) {
432  if (op->IsDoubleRegister()) {
433  return ToDoubleRegister(op->index());
434  } else if (op->IsConstantOperand()) {
435  LConstantOperand* const_op = LConstantOperand::cast(op);
436  HConstant* constant = chunk_->LookupConstant(const_op);
437  Handle<Object> literal = constant->handle(isolate());
438  Representation r = chunk_->LookupLiteralRepresentation(const_op);
439  if (r.IsInteger32()) {
440  ASSERT(literal->IsNumber());
441  __ li(at, Operand(static_cast<int32_t>(literal->Number())));
442  __ mtc1(at, flt_scratch);
443  __ cvt_d_w(dbl_scratch, flt_scratch);
444  return dbl_scratch;
445  } else if (r.IsDouble()) {
446  Abort(kUnsupportedDoubleImmediate);
447  } else if (r.IsTagged()) {
448  Abort(kUnsupportedTaggedImmediate);
449  }
450  } else if (op->IsStackSlot()) {
451  MemOperand mem_op = ToMemOperand(op);
452  __ ldc1(dbl_scratch, mem_op);
453  return dbl_scratch;
454  }
455  UNREACHABLE();
456  return dbl_scratch;
457 }
458 
459 
460 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
461  HConstant* constant = chunk_->LookupConstant(op);
462  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
463  return constant->handle(isolate());
464 }
465 
466 
467 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
468  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
469 }
470 
471 
472 bool LCodeGen::IsSmi(LConstantOperand* op) const {
473  return chunk_->LookupLiteralRepresentation(op).IsSmi();
474 }
475 
476 
477 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
479 }
480 
481 
482 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
483  const Representation& r) const {
484  HConstant* constant = chunk_->LookupConstant(op);
485  int32_t value = constant->Integer32Value();
486  if (r.IsInteger32()) return value;
487  ASSERT(r.IsSmiOrTagged());
488  return reinterpret_cast<int32_t>(Smi::FromInt(value));
489 }
490 
491 
492 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
493  HConstant* constant = chunk_->LookupConstant(op);
494  return Smi::FromInt(constant->Integer32Value());
495 }
496 
497 
498 double LCodeGen::ToDouble(LConstantOperand* op) const {
499  HConstant* constant = chunk_->LookupConstant(op);
500  ASSERT(constant->HasDoubleValue());
501  return constant->DoubleValue();
502 }
503 
504 
505 Operand LCodeGen::ToOperand(LOperand* op) {
506  if (op->IsConstantOperand()) {
507  LConstantOperand* const_op = LConstantOperand::cast(op);
508  HConstant* constant = chunk()->LookupConstant(const_op);
509  Representation r = chunk_->LookupLiteralRepresentation(const_op);
510  if (r.IsSmi()) {
511  ASSERT(constant->HasSmiValue());
512  return Operand(Smi::FromInt(constant->Integer32Value()));
513  } else if (r.IsInteger32()) {
514  ASSERT(constant->HasInteger32Value());
515  return Operand(constant->Integer32Value());
516  } else if (r.IsDouble()) {
517  Abort(kToOperandUnsupportedDoubleImmediate);
518  }
519  ASSERT(r.IsTagged());
520  return Operand(constant->handle(isolate()));
521  } else if (op->IsRegister()) {
522  return Operand(ToRegister(op));
523  } else if (op->IsDoubleRegister()) {
524  Abort(kToOperandIsDoubleRegisterUnimplemented);
525  return Operand(0);
526  }
527  // Stack slots not implemented, use ToMemOperand instead.
528  UNREACHABLE();
529  return Operand(0);
530 }
531 
532 
533 static int ArgumentsOffsetWithoutFrame(int index) {
534  ASSERT(index < 0);
535  return -(index + 1) * kPointerSize;
536 }
537 
538 
539 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
540  ASSERT(!op->IsRegister());
541  ASSERT(!op->IsDoubleRegister());
542  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
543  if (NeedsEagerFrame()) {
544  return MemOperand(fp, StackSlotOffset(op->index()));
545  } else {
546  // Retrieve parameter without eager stack-frame relative to the
547  // stack-pointer.
548  return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
549  }
550 }
551 
552 
553 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
554  ASSERT(op->IsDoubleStackSlot());
555  if (NeedsEagerFrame()) {
556  return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
557  } else {
558  // Retrieve parameter without eager stack-frame relative to the
559  // stack-pointer.
560  return MemOperand(
561  sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
562  }
563 }
564 
565 
566 void LCodeGen::WriteTranslation(LEnvironment* environment,
567  Translation* translation) {
568  if (environment == NULL) return;
569 
570  // The translation includes one command per value in the environment.
571  int translation_size = environment->translation_size();
572  // The output frame height does not include the parameters.
573  int height = translation_size - environment->parameter_count();
574 
575  WriteTranslation(environment->outer(), translation);
576  bool has_closure_id = !info()->closure().is_null() &&
577  !info()->closure().is_identical_to(environment->closure());
578  int closure_id = has_closure_id
579  ? DefineDeoptimizationLiteral(environment->closure())
580  : Translation::kSelfLiteralId;
581 
582  switch (environment->frame_type()) {
583  case JS_FUNCTION:
584  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
585  break;
586  case JS_CONSTRUCT:
587  translation->BeginConstructStubFrame(closure_id, translation_size);
588  break;
589  case JS_GETTER:
590  ASSERT(translation_size == 1);
591  ASSERT(height == 0);
592  translation->BeginGetterStubFrame(closure_id);
593  break;
594  case JS_SETTER:
595  ASSERT(translation_size == 2);
596  ASSERT(height == 0);
597  translation->BeginSetterStubFrame(closure_id);
598  break;
599  case STUB:
600  translation->BeginCompiledStubFrame();
601  break;
602  case ARGUMENTS_ADAPTOR:
603  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
604  break;
605  }
606 
607  int object_index = 0;
608  int dematerialized_index = 0;
609  for (int i = 0; i < translation_size; ++i) {
610  LOperand* value = environment->values()->at(i);
611  AddToTranslation(environment,
612  translation,
613  value,
614  environment->HasTaggedValueAt(i),
615  environment->HasUint32ValueAt(i),
616  &object_index,
617  &dematerialized_index);
618  }
619 }
620 
621 
622 void LCodeGen::AddToTranslation(LEnvironment* environment,
623  Translation* translation,
624  LOperand* op,
625  bool is_tagged,
626  bool is_uint32,
627  int* object_index_pointer,
628  int* dematerialized_index_pointer) {
629  if (op == LEnvironment::materialization_marker()) {
630  int object_index = (*object_index_pointer)++;
631  if (environment->ObjectIsDuplicateAt(object_index)) {
632  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
633  translation->DuplicateObject(dupe_of);
634  return;
635  }
636  int object_length = environment->ObjectLengthAt(object_index);
637  if (environment->ObjectIsArgumentsAt(object_index)) {
638  translation->BeginArgumentsObject(object_length);
639  } else {
640  translation->BeginCapturedObject(object_length);
641  }
642  int dematerialized_index = *dematerialized_index_pointer;
643  int env_offset = environment->translation_size() + dematerialized_index;
644  *dematerialized_index_pointer += object_length;
645  for (int i = 0; i < object_length; ++i) {
646  LOperand* value = environment->values()->at(env_offset + i);
647  AddToTranslation(environment,
648  translation,
649  value,
650  environment->HasTaggedValueAt(env_offset + i),
651  environment->HasUint32ValueAt(env_offset + i),
652  object_index_pointer,
653  dematerialized_index_pointer);
654  }
655  return;
656  }
657 
658  if (op->IsStackSlot()) {
659  if (is_tagged) {
660  translation->StoreStackSlot(op->index());
661  } else if (is_uint32) {
662  translation->StoreUint32StackSlot(op->index());
663  } else {
664  translation->StoreInt32StackSlot(op->index());
665  }
666  } else if (op->IsDoubleStackSlot()) {
667  translation->StoreDoubleStackSlot(op->index());
668  } else if (op->IsRegister()) {
669  Register reg = ToRegister(op);
670  if (is_tagged) {
671  translation->StoreRegister(reg);
672  } else if (is_uint32) {
673  translation->StoreUint32Register(reg);
674  } else {
675  translation->StoreInt32Register(reg);
676  }
677  } else if (op->IsDoubleRegister()) {
679  translation->StoreDoubleRegister(reg);
680  } else if (op->IsConstantOperand()) {
681  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
682  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
683  translation->StoreLiteral(src_index);
684  } else {
685  UNREACHABLE();
686  }
687 }
688 
689 
690 void LCodeGen::CallCode(Handle<Code> code,
691  RelocInfo::Mode mode,
692  LInstruction* instr) {
693  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
694 }
695 
696 
697 void LCodeGen::CallCodeGeneric(Handle<Code> code,
698  RelocInfo::Mode mode,
699  LInstruction* instr,
700  SafepointMode safepoint_mode) {
701  ASSERT(instr != NULL);
702  __ Call(code, mode);
703  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
704 }
705 
706 
707 void LCodeGen::CallRuntime(const Runtime::Function* function,
708  int num_arguments,
709  LInstruction* instr,
710  SaveFPRegsMode save_doubles) {
711  ASSERT(instr != NULL);
712 
713  __ CallRuntime(function, num_arguments, save_doubles);
714 
715  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
716 }
717 
718 
719 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
720  if (context->IsRegister()) {
721  __ Move(cp, ToRegister(context));
722  } else if (context->IsStackSlot()) {
723  __ lw(cp, ToMemOperand(context));
724  } else if (context->IsConstantOperand()) {
725  HConstant* constant =
726  chunk_->LookupConstant(LConstantOperand::cast(context));
727  __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
728  } else {
729  UNREACHABLE();
730  }
731 }
732 
733 
734 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
735  int argc,
736  LInstruction* instr,
737  LOperand* context) {
738  LoadContextFromDeferred(context);
739  __ CallRuntimeSaveDoubles(id);
740  RecordSafepointWithRegisters(
741  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
742 }
743 
744 
745 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
746  Safepoint::DeoptMode mode) {
747  if (!environment->HasBeenRegistered()) {
748  // Physical stack frame layout:
749  // -x ............. -4 0 ..................................... y
750  // [incoming arguments] [spill slots] [pushed outgoing arguments]
751 
752  // Layout of the environment:
753  // 0 ..................................................... size-1
754  // [parameters] [locals] [expression stack including arguments]
755 
756  // Layout of the translation:
757  // 0 ........................................................ size - 1 + 4
758  // [expression stack including arguments] [locals] [4 words] [parameters]
759  // |>------------ translation_size ------------<|
760 
761  int frame_count = 0;
762  int jsframe_count = 0;
763  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
764  ++frame_count;
765  if (e->frame_type() == JS_FUNCTION) {
766  ++jsframe_count;
767  }
768  }
769  Translation translation(&translations_, frame_count, jsframe_count, zone());
770  WriteTranslation(environment, &translation);
771  int deoptimization_index = deoptimizations_.length();
772  int pc_offset = masm()->pc_offset();
773  environment->Register(deoptimization_index,
774  translation.index(),
775  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
776  deoptimizations_.Add(environment, zone());
777  }
778 }
779 
780 
781 void LCodeGen::DeoptimizeIf(Condition condition,
782  LEnvironment* environment,
783  Deoptimizer::BailoutType bailout_type,
784  Register src1,
785  const Operand& src2) {
786  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
787  ASSERT(environment->HasBeenRegistered());
788  int id = environment->deoptimization_index();
789  ASSERT(info()->IsOptimizing() || info()->IsStub());
790  Address entry =
791  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
792  if (entry == NULL) {
793  Abort(kBailoutWasNotPrepared);
794  return;
795  }
796 
797  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
798  Register scratch = scratch0();
799  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
800  Label no_deopt;
801  __ Push(a1, scratch);
802  __ li(scratch, Operand(count));
803  __ lw(a1, MemOperand(scratch));
804  __ Subu(a1, a1, Operand(1));
805  __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
806  __ li(a1, Operand(FLAG_deopt_every_n_times));
807  __ sw(a1, MemOperand(scratch));
808  __ Pop(a1, scratch);
809 
810  __ Call(entry, RelocInfo::RUNTIME_ENTRY);
811  __ bind(&no_deopt);
812  __ sw(a1, MemOperand(scratch));
813  __ Pop(a1, scratch);
814  }
815 
816  if (info()->ShouldTrapOnDeopt()) {
817  Label skip;
818  if (condition != al) {
819  __ Branch(&skip, NegateCondition(condition), src1, src2);
820  }
821  __ stop("trap_on_deopt");
822  __ bind(&skip);
823  }
824 
825  ASSERT(info()->IsStub() || frame_is_built_);
826  // Go through jump table if we need to handle condition, build frame, or
827  // restore caller doubles.
828  if (condition == al && frame_is_built_ &&
829  !info()->saves_caller_doubles()) {
830  __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
831  } else {
832  // We often have several deopts to the same entry, reuse the last
833  // jump entry if this is the case.
834  if (deopt_jump_table_.is_empty() ||
835  (deopt_jump_table_.last().address != entry) ||
836  (deopt_jump_table_.last().bailout_type != bailout_type) ||
837  (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
838  Deoptimizer::JumpTableEntry table_entry(entry,
839  bailout_type,
840  !frame_is_built_);
841  deopt_jump_table_.Add(table_entry, zone());
842  }
843  __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
844  }
845 }
846 
847 
848 void LCodeGen::DeoptimizeIf(Condition condition,
849  LEnvironment* environment,
850  Register src1,
851  const Operand& src2) {
852  Deoptimizer::BailoutType bailout_type = info()->IsStub()
855  DeoptimizeIf(condition, environment, bailout_type, src1, src2);
856 }
857 
858 
859 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
860  int length = deoptimizations_.length();
861  if (length == 0) return;
862  Handle<DeoptimizationInputData> data =
863  factory()->NewDeoptimizationInputData(length, TENURED);
864 
865  Handle<ByteArray> translations =
866  translations_.CreateByteArray(isolate()->factory());
867  data->SetTranslationByteArray(*translations);
868  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
869  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
870  if (info_->IsOptimizing()) {
871  // Reference to shared function info does not change between phases.
872  AllowDeferredHandleDereference allow_handle_dereference;
873  data->SetSharedFunctionInfo(*info_->shared_info());
874  } else {
875  data->SetSharedFunctionInfo(Smi::FromInt(0));
876  }
877 
878  Handle<FixedArray> literals =
879  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
880  { AllowDeferredHandleDereference copy_handles;
881  for (int i = 0; i < deoptimization_literals_.length(); i++) {
882  literals->set(i, *deoptimization_literals_[i]);
883  }
884  data->SetLiteralArray(*literals);
885  }
886 
887  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
888  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
889 
890  // Populate the deoptimization entries.
891  for (int i = 0; i < length; i++) {
892  LEnvironment* env = deoptimizations_[i];
893  data->SetAstId(i, env->ast_id());
894  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
895  data->SetArgumentsStackHeight(i,
896  Smi::FromInt(env->arguments_stack_height()));
897  data->SetPc(i, Smi::FromInt(env->pc_offset()));
898  }
899  code->set_deoptimization_data(*data);
900 }
901 
902 
903 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
904  int result = deoptimization_literals_.length();
905  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
906  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
907  }
908  deoptimization_literals_.Add(literal, zone());
909  return result;
910 }
911 
912 
913 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
914  ASSERT(deoptimization_literals_.length() == 0);
915 
916  const ZoneList<Handle<JSFunction> >* inlined_closures =
917  chunk()->inlined_closures();
918 
919  for (int i = 0, length = inlined_closures->length();
920  i < length;
921  i++) {
922  DefineDeoptimizationLiteral(inlined_closures->at(i));
923  }
924 
925  inlined_function_count_ = deoptimization_literals_.length();
926 }
927 
928 
929 void LCodeGen::RecordSafepointWithLazyDeopt(
930  LInstruction* instr, SafepointMode safepoint_mode) {
931  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
932  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
933  } else {
934  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
935  RecordSafepointWithRegisters(
936  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
937  }
938 }
939 
940 
941 void LCodeGen::RecordSafepoint(
942  LPointerMap* pointers,
943  Safepoint::Kind kind,
944  int arguments,
945  Safepoint::DeoptMode deopt_mode) {
946  ASSERT(expected_safepoint_kind_ == kind);
947 
948  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
949  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
950  kind, arguments, deopt_mode);
951  for (int i = 0; i < operands->length(); i++) {
952  LOperand* pointer = operands->at(i);
953  if (pointer->IsStackSlot()) {
954  safepoint.DefinePointerSlot(pointer->index(), zone());
955  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
956  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
957  }
958  }
959 }
960 
961 
962 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
963  Safepoint::DeoptMode deopt_mode) {
964  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
965 }
966 
967 
968 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
969  LPointerMap empty_pointers(zone());
970  RecordSafepoint(&empty_pointers, deopt_mode);
971 }
972 
973 
974 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
975  int arguments,
976  Safepoint::DeoptMode deopt_mode) {
977  RecordSafepoint(
978  pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
979 }
980 
981 
982 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
983  LPointerMap* pointers,
984  int arguments,
985  Safepoint::DeoptMode deopt_mode) {
986  RecordSafepoint(
987  pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
988 }
989 
990 
991 void LCodeGen::RecordAndWritePosition(int position) {
992  if (position == RelocInfo::kNoPosition) return;
993  masm()->positions_recorder()->RecordPosition(position);
994  masm()->positions_recorder()->WriteRecordedPositions();
995 }
996 
997 
998 static const char* LabelType(LLabel* label) {
999  if (label->is_loop_header()) return " (loop header)";
1000  if (label->is_osr_entry()) return " (OSR entry)";
1001  return "";
1002 }
1003 
1004 
1005 void LCodeGen::DoLabel(LLabel* label) {
1006  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1007  current_instruction_,
1008  label->hydrogen_value()->id(),
1009  label->block_id(),
1010  LabelType(label));
1011  __ bind(label->label());
1012  current_block_ = label->block_id();
1013  DoGap(label);
1014 }
1015 
1016 
1017 void LCodeGen::DoParallelMove(LParallelMove* move) {
1018  resolver_.Resolve(move);
1019 }
1020 
1021 
1022 void LCodeGen::DoGap(LGap* gap) {
1023  for (int i = LGap::FIRST_INNER_POSITION;
1025  i++) {
1026  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1027  LParallelMove* move = gap->GetParallelMove(inner_pos);
1028  if (move != NULL) DoParallelMove(move);
1029  }
1030 }
1031 
1032 
1033 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1034  DoGap(instr);
1035 }
1036 
1037 
1038 void LCodeGen::DoParameter(LParameter* instr) {
1039  // Nothing to do.
1040 }
1041 
1042 
1043 void LCodeGen::DoCallStub(LCallStub* instr) {
1044  ASSERT(ToRegister(instr->context()).is(cp));
1045  ASSERT(ToRegister(instr->result()).is(v0));
1046  switch (instr->hydrogen()->major_key()) {
1047  case CodeStub::RegExpExec: {
1048  RegExpExecStub stub;
1049  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1050  break;
1051  }
1052  case CodeStub::SubString: {
1053  SubStringStub stub;
1054  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1055  break;
1056  }
1057  case CodeStub::StringCompare: {
1058  StringCompareStub stub;
1059  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1060  break;
1061  }
1062  default:
1063  UNREACHABLE();
1064  }
1065 }
1066 
1067 
1068 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1069  GenerateOsrPrologue();
1070 }
1071 
1072 
1073 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1074  Register dividend = ToRegister(instr->dividend());
1075  int32_t divisor = instr->divisor();
1076  ASSERT(dividend.is(ToRegister(instr->result())));
1077 
1078  // Theoretically, a variation of the branch-free code for integer division by
1079  // a power of 2 (calculating the remainder via an additional multiplication
1080  // (which gets simplified to an 'and') and subtraction) should be faster, and
1081  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1082  // indicate that positive dividends are heavily favored, so the branching
1083  // version performs better.
1084  HMod* hmod = instr->hydrogen();
1085  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1086  Label dividend_is_not_negative, done;
1087 
1088  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1089  __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
1090  // Note: The code below even works when right contains kMinInt.
1091  __ subu(dividend, zero_reg, dividend);
1092  __ And(dividend, dividend, Operand(mask));
1093  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1094  DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
1095  }
1096  __ Branch(USE_DELAY_SLOT, &done);
1097  __ subu(dividend, zero_reg, dividend);
1098  }
1099 
1100  __ bind(&dividend_is_not_negative);
1101  __ And(dividend, dividend, Operand(mask));
1102  __ bind(&done);
1103 }
1104 
1105 
1106 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1107  Register dividend = ToRegister(instr->dividend());
1108  int32_t divisor = instr->divisor();
1109  Register result = ToRegister(instr->result());
1110  ASSERT(!dividend.is(result));
1111 
1112  if (divisor == 0) {
1113  DeoptimizeIf(al, instr->environment());
1114  return;
1115  }
1116 
1117  __ TruncatingDiv(result, dividend, Abs(divisor));
1118  __ Mul(result, result, Operand(Abs(divisor)));
1119  __ Subu(result, dividend, Operand(result));
1120 
1121  // Check for negative zero.
1122  HMod* hmod = instr->hydrogen();
1123  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1124  Label remainder_not_zero;
1125  __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1126  DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
1127  __ bind(&remainder_not_zero);
1128  }
1129 }
1130 
1131 
1132 void LCodeGen::DoModI(LModI* instr) {
1133  HMod* hmod = instr->hydrogen();
1134  const Register left_reg = ToRegister(instr->left());
1135  const Register right_reg = ToRegister(instr->right());
1136  const Register result_reg = ToRegister(instr->result());
1137 
1138  // div runs in the background while we check for special cases.
1139  __ div(left_reg, right_reg);
1140 
1141  Label done;
1142  // Check for x % 0, we have to deopt in this case because we can't return a
1143  // NaN.
1144  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1145  DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
1146  }
1147 
1148  // Check for kMinInt % -1, div will return kMinInt, which is not what we
1149  // want. We have to deopt if we care about -0, because we can't return that.
1150  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1151  Label no_overflow_possible;
1152  __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1153  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1154  DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
1155  } else {
1156  __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1157  __ Branch(USE_DELAY_SLOT, &done);
1158  __ mov(result_reg, zero_reg);
1159  }
1160  __ bind(&no_overflow_possible);
1161  }
1162 
1163  // If we care about -0, test if the dividend is <0 and the result is 0.
1164  __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
1165  __ mfhi(result_reg);
1166  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1167  DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1168  }
1169  __ bind(&done);
1170 }
1171 
1172 
1173 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1174  Register dividend = ToRegister(instr->dividend());
1175  int32_t divisor = instr->divisor();
1176  Register result = ToRegister(instr->result());
1177  ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
1178  ASSERT(!result.is(dividend));
1179 
1180  // Check for (0 / -x) that will produce negative zero.
1181  HDiv* hdiv = instr->hydrogen();
1182  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1183  DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
1184  }
1185  // Check for (kMinInt / -1).
1186  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1187  DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
1188  }
1189  // Deoptimize if remainder will not be 0.
1190  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1191  divisor != 1 && divisor != -1) {
1192  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1193  __ And(at, dividend, Operand(mask));
1194  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1195  }
1196 
1197  if (divisor == -1) { // Nice shortcut, not needed for correctness.
1198  __ Subu(result, zero_reg, dividend);
1199  return;
1200  }
1201  uint16_t shift = WhichPowerOf2Abs(divisor);
1202  if (shift == 0) {
1203  __ Move(result, dividend);
1204  } else if (shift == 1) {
1205  __ srl(result, dividend, 31);
1206  __ Addu(result, dividend, Operand(result));
1207  } else {
1208  __ sra(result, dividend, 31);
1209  __ srl(result, result, 32 - shift);
1210  __ Addu(result, dividend, Operand(result));
1211  }
1212  if (shift > 0) __ sra(result, result, shift);
1213  if (divisor < 0) __ Subu(result, zero_reg, result);
1214 }
1215 
1216 
1217 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1218  Register dividend = ToRegister(instr->dividend());
1219  int32_t divisor = instr->divisor();
1220  Register result = ToRegister(instr->result());
1221  ASSERT(!dividend.is(result));
1222 
1223  if (divisor == 0) {
1224  DeoptimizeIf(al, instr->environment());
1225  return;
1226  }
1227 
1228  // Check for (0 / -x) that will produce negative zero.
1229  HDiv* hdiv = instr->hydrogen();
1230  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1231  DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
1232  }
1233 
1234  __ TruncatingDiv(result, dividend, Abs(divisor));
1235  if (divisor < 0) __ Subu(result, zero_reg, result);
1236 
1237  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1238  __ Mul(scratch0(), result, Operand(divisor));
1239  __ Subu(scratch0(), scratch0(), dividend);
1240  DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
1241  }
1242 }
1243 
1244 
1245 void LCodeGen::DoDivI(LDivI* instr) {
1246  HBinaryOperation* hdiv = instr->hydrogen();
1247  const Register left = ToRegister(instr->left());
1248  const Register right = ToRegister(instr->right());
1249  const Register result = ToRegister(instr->result());
1250 
1251  // On MIPS div is asynchronous - it will run in the background while we
1252  // check for special cases.
1253  __ div(left, right);
1254 
1255  // Check for x / 0.
1256  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1257  DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
1258  }
1259 
1260  // Check for (0 / -x) that will produce negative zero.
1261  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1262  Label left_not_zero;
1263  __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
1264  DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
1265  __ bind(&left_not_zero);
1266  }
1267 
1268  // Check for (kMinInt / -1).
1269  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1270  !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1271  Label left_not_min_int;
1272  __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
1273  DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
1274  __ bind(&left_not_min_int);
1275  }
1276 
1277  if (hdiv->IsMathFloorOfDiv()) {
1278  // We performed a truncating division. Correct the result if necessary.
1279  Label done;
1280  Register remainder = scratch0();
1281  __ mfhi(remainder);
1282  __ mflo(result);
1283  __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1284  __ Xor(remainder, remainder, Operand(right));
1285  __ Branch(&done, ge, remainder, Operand(zero_reg));
1286  __ Subu(result, result, Operand(1));
1287  __ bind(&done);
1288  } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1289  __ mfhi(result);
1290  DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
1291  __ mflo(result);
1292  } else {
1293  __ mflo(result);
1294  }
1295 }
1296 
1297 
1298 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1299  DoubleRegister addend = ToDoubleRegister(instr->addend());
1300  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1301  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1302 
1303  // This is computed in-place.
1304  ASSERT(addend.is(ToDoubleRegister(instr->result())));
1305 
1306  __ madd_d(addend, addend, multiplier, multiplicand);
1307 }
1308 
1309 
1310 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1311  Register dividend = ToRegister(instr->dividend());
1312  Register result = ToRegister(instr->result());
1313  int32_t divisor = instr->divisor();
1314  Register scratch = scratch0();
1315  ASSERT(!scratch.is(dividend));
1316 
1317  // If the divisor is positive, things are easy: There can be no deopts and we
1318  // can simply do an arithmetic right shift.
1319  if (divisor == 1) return;
1320  uint16_t shift = WhichPowerOf2Abs(divisor);
1321  if (divisor > 1) {
1322  __ sra(result, dividend, shift);
1323  return;
1324  }
1325 
1326  // If the divisor is negative, we have to negate and handle edge cases.
1327  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1328  __ Move(scratch, dividend);
1329  }
1330  __ Subu(result, zero_reg, dividend);
1331  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1332  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
1333  }
1334  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1335  // Note that we could emit branch-free code, but that would need one more
1336  // register.
1337  __ Xor(at, scratch, result);
1338  if (divisor == -1) {
1339  DeoptimizeIf(ge, instr->environment(), at, Operand(zero_reg));
1340  __ sra(result, dividend, shift);
1341  } else {
1342  Label no_overflow, done;
1343  __ Branch(&no_overflow, lt, at, Operand(zero_reg));
1344  __ li(result, Operand(kMinInt / divisor));
1345  __ Branch(&done);
1346  __ bind(&no_overflow);
1347  __ sra(result, dividend, shift);
1348  __ bind(&done);
1349  }
1350  } else {
1351  __ sra(result, dividend, shift);
1352  }
1353 }
1354 
1355 
1356 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1357  Register dividend = ToRegister(instr->dividend());
1358  int32_t divisor = instr->divisor();
1359  Register result = ToRegister(instr->result());
1360  ASSERT(!dividend.is(result));
1361 
1362  if (divisor == 0) {
1363  DeoptimizeIf(al, instr->environment());
1364  return;
1365  }
1366 
1367  // Check for (0 / -x) that will produce negative zero.
1368  HMathFloorOfDiv* hdiv = instr->hydrogen();
1369  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1370  DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
1371  }
1372 
1373  // Easy case: We need no dynamic check for the dividend and the flooring
1374  // division is the same as the truncating division.
1375  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1376  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1377  __ TruncatingDiv(result, dividend, Abs(divisor));
1378  if (divisor < 0) __ Subu(result, zero_reg, result);
1379  return;
1380  }
1381 
1382  // In the general case we may need to adjust before and after the truncating
1383  // division to get a flooring division.
1384  Register temp = ToRegister(instr->temp());
1385  ASSERT(!temp.is(dividend) && !temp.is(result));
1386  Label needs_adjustment, done;
1387  __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1388  dividend, Operand(zero_reg));
1389  __ TruncatingDiv(result, dividend, Abs(divisor));
1390  if (divisor < 0) __ Subu(result, zero_reg, result);
1391  __ jmp(&done);
1392  __ bind(&needs_adjustment);
1393  __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1394  __ TruncatingDiv(result, temp, Abs(divisor));
1395  if (divisor < 0) __ Subu(result, zero_reg, result);
1396  __ Subu(result, result, Operand(1));
1397  __ bind(&done);
1398 }
1399 
1400 
1401 void LCodeGen::DoMulI(LMulI* instr) {
1402  Register scratch = scratch0();
1403  Register result = ToRegister(instr->result());
1404  // Note that result may alias left.
1405  Register left = ToRegister(instr->left());
1406  LOperand* right_op = instr->right();
1407 
1408  bool bailout_on_minus_zero =
1409  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1410  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1411 
1412  if (right_op->IsConstantOperand()) {
1413  int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1414 
1415  if (bailout_on_minus_zero && (constant < 0)) {
1416  // The case of a null constant will be handled separately.
1417  // If constant is negative and left is null, the result should be -0.
1418  DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1419  }
1420 
1421  switch (constant) {
1422  case -1:
1423  if (overflow) {
1424  __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1425  DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1426  } else {
1427  __ Subu(result, zero_reg, left);
1428  }
1429  break;
1430  case 0:
1431  if (bailout_on_minus_zero) {
1432  // If left is strictly negative and the constant is null, the
1433  // result is -0. Deoptimize if required, otherwise return 0.
1434  DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
1435  }
1436  __ mov(result, zero_reg);
1437  break;
1438  case 1:
1439  // Nothing to do.
1440  __ Move(result, left);
1441  break;
1442  default:
1443  // Multiplying by powers of two and powers of two plus or minus
1444  // one can be done faster with shifted operands.
1445  // For other constants we emit standard code.
1446  int32_t mask = constant >> 31;
1447  uint32_t constant_abs = (constant + mask) ^ mask;
1448 
1449  if (IsPowerOf2(constant_abs)) {
1450  int32_t shift = WhichPowerOf2(constant_abs);
1451  __ sll(result, left, shift);
1452  // Correct the sign of the result if the constant is negative.
1453  if (constant < 0) __ Subu(result, zero_reg, result);
1454  } else if (IsPowerOf2(constant_abs - 1)) {
1455  int32_t shift = WhichPowerOf2(constant_abs - 1);
1456  __ sll(scratch, left, shift);
1457  __ Addu(result, scratch, left);
1458  // Correct the sign of the result if the constant is negative.
1459  if (constant < 0) __ Subu(result, zero_reg, result);
1460  } else if (IsPowerOf2(constant_abs + 1)) {
1461  int32_t shift = WhichPowerOf2(constant_abs + 1);
1462  __ sll(scratch, left, shift);
1463  __ Subu(result, scratch, left);
1464  // Correct the sign of the result if the constant is negative.
1465  if (constant < 0) __ Subu(result, zero_reg, result);
1466  } else {
1467  // Generate standard code.
1468  __ li(at, constant);
1469  __ Mul(result, left, at);
1470  }
1471  }
1472 
1473  } else {
1474  ASSERT(right_op->IsRegister());
1475  Register right = ToRegister(right_op);
1476 
1477  if (overflow) {
1478  // hi:lo = left * right.
1479  if (instr->hydrogen()->representation().IsSmi()) {
1480  __ SmiUntag(result, left);
1481  __ mult(result, right);
1482  __ mfhi(scratch);
1483  __ mflo(result);
1484  } else {
1485  __ mult(left, right);
1486  __ mfhi(scratch);
1487  __ mflo(result);
1488  }
1489  __ sra(at, result, 31);
1490  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1491  } else {
1492  if (instr->hydrogen()->representation().IsSmi()) {
1493  __ SmiUntag(result, left);
1494  __ Mul(result, result, right);
1495  } else {
1496  __ Mul(result, left, right);
1497  }
1498  }
1499 
1500  if (bailout_on_minus_zero) {
1501  Label done;
1502  __ Xor(at, left, right);
1503  __ Branch(&done, ge, at, Operand(zero_reg));
1504  // Bail out if the result is minus zero.
1505  DeoptimizeIf(eq,
1506  instr->environment(),
1507  result,
1508  Operand(zero_reg));
1509  __ bind(&done);
1510  }
1511  }
1512 }
1513 
1514 
1515 void LCodeGen::DoBitI(LBitI* instr) {
1516  LOperand* left_op = instr->left();
1517  LOperand* right_op = instr->right();
1518  ASSERT(left_op->IsRegister());
1519  Register left = ToRegister(left_op);
1520  Register result = ToRegister(instr->result());
1521  Operand right(no_reg);
1522 
1523  if (right_op->IsStackSlot()) {
1524  right = Operand(EmitLoadRegister(right_op, at));
1525  } else {
1526  ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1527  right = ToOperand(right_op);
1528  }
1529 
1530  switch (instr->op()) {
1531  case Token::BIT_AND:
1532  __ And(result, left, right);
1533  break;
1534  case Token::BIT_OR:
1535  __ Or(result, left, right);
1536  break;
1537  case Token::BIT_XOR:
1538  if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1539  __ Nor(result, zero_reg, left);
1540  } else {
1541  __ Xor(result, left, right);
1542  }
1543  break;
1544  default:
1545  UNREACHABLE();
1546  break;
1547  }
1548 }
1549 
1550 
1551 void LCodeGen::DoShiftI(LShiftI* instr) {
1552  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1553  // result may alias either of them.
1554  LOperand* right_op = instr->right();
1555  Register left = ToRegister(instr->left());
1556  Register result = ToRegister(instr->result());
1557  Register scratch = scratch0();
1558 
1559  if (right_op->IsRegister()) {
1560  // No need to mask the right operand on MIPS, it is built into the variable
1561  // shift instructions.
1562  switch (instr->op()) {
1563  case Token::ROR:
1564  __ Ror(result, left, Operand(ToRegister(right_op)));
1565  break;
1566  case Token::SAR:
1567  __ srav(result, left, ToRegister(right_op));
1568  break;
1569  case Token::SHR:
1570  __ srlv(result, left, ToRegister(right_op));
1571  if (instr->can_deopt()) {
1572  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1573  }
1574  break;
1575  case Token::SHL:
1576  __ sllv(result, left, ToRegister(right_op));
1577  break;
1578  default:
1579  UNREACHABLE();
1580  break;
1581  }
1582  } else {
1583  // Mask the right_op operand.
1584  int value = ToInteger32(LConstantOperand::cast(right_op));
1585  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1586  switch (instr->op()) {
1587  case Token::ROR:
1588  if (shift_count != 0) {
1589  __ Ror(result, left, Operand(shift_count));
1590  } else {
1591  __ Move(result, left);
1592  }
1593  break;
1594  case Token::SAR:
1595  if (shift_count != 0) {
1596  __ sra(result, left, shift_count);
1597  } else {
1598  __ Move(result, left);
1599  }
1600  break;
1601  case Token::SHR:
1602  if (shift_count != 0) {
1603  __ srl(result, left, shift_count);
1604  } else {
1605  if (instr->can_deopt()) {
1606  __ And(at, left, Operand(0x80000000));
1607  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1608  }
1609  __ Move(result, left);
1610  }
1611  break;
1612  case Token::SHL:
1613  if (shift_count != 0) {
1614  if (instr->hydrogen_value()->representation().IsSmi() &&
1615  instr->can_deopt()) {
1616  if (shift_count != 1) {
1617  __ sll(result, left, shift_count - 1);
1618  __ SmiTagCheckOverflow(result, result, scratch);
1619  } else {
1620  __ SmiTagCheckOverflow(result, left, scratch);
1621  }
1622  DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1623  } else {
1624  __ sll(result, left, shift_count);
1625  }
1626  } else {
1627  __ Move(result, left);
1628  }
1629  break;
1630  default:
1631  UNREACHABLE();
1632  break;
1633  }
1634  }
1635 }
1636 
1637 
1638 void LCodeGen::DoSubI(LSubI* instr) {
1639  LOperand* left = instr->left();
1640  LOperand* right = instr->right();
1641  LOperand* result = instr->result();
1642  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1643 
1644  if (!can_overflow) {
1645  if (right->IsStackSlot()) {
1646  Register right_reg = EmitLoadRegister(right, at);
1647  __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1648  } else {
1649  ASSERT(right->IsRegister() || right->IsConstantOperand());
1650  __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1651  }
1652  } else { // can_overflow.
1653  Register overflow = scratch0();
1654  Register scratch = scratch1();
1655  if (right->IsStackSlot() || right->IsConstantOperand()) {
1656  Register right_reg = EmitLoadRegister(right, scratch);
1657  __ SubuAndCheckForOverflow(ToRegister(result),
1658  ToRegister(left),
1659  right_reg,
1660  overflow); // Reg at also used as scratch.
1661  } else {
1662  ASSERT(right->IsRegister());
1663  // Due to overflow check macros not supporting constant operands,
1664  // handling the IsConstantOperand case was moved to prev if clause.
1665  __ SubuAndCheckForOverflow(ToRegister(result),
1666  ToRegister(left),
1667  ToRegister(right),
1668  overflow); // Reg at also used as scratch.
1669  }
1670  DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1671  }
1672 }
1673 
1674 
1675 void LCodeGen::DoConstantI(LConstantI* instr) {
1676  __ li(ToRegister(instr->result()), Operand(instr->value()));
1677 }
1678 
1679 
1680 void LCodeGen::DoConstantS(LConstantS* instr) {
1681  __ li(ToRegister(instr->result()), Operand(instr->value()));
1682 }
1683 
1684 
1685 void LCodeGen::DoConstantD(LConstantD* instr) {
1686  ASSERT(instr->result()->IsDoubleRegister());
1687  DoubleRegister result = ToDoubleRegister(instr->result());
1688  double v = instr->value();
1689  __ Move(result, v);
1690 }
1691 
1692 
1693 void LCodeGen::DoConstantE(LConstantE* instr) {
1694  __ li(ToRegister(instr->result()), Operand(instr->value()));
1695 }
1696 
1697 
1698 void LCodeGen::DoConstantT(LConstantT* instr) {
1699  Handle<Object> value = instr->value(isolate());
1701  __ li(ToRegister(instr->result()), value);
1702 }
1703 
1704 
1705 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1706  Register result = ToRegister(instr->result());
1707  Register map = ToRegister(instr->value());
1708  __ EnumLength(result, map);
1709 }
1710 
1711 
1712 void LCodeGen::DoDateField(LDateField* instr) {
1713  Register object = ToRegister(instr->date());
1714  Register result = ToRegister(instr->result());
1715  Register scratch = ToRegister(instr->temp());
1716  Smi* index = instr->index();
1717  Label runtime, done;
1718  ASSERT(object.is(a0));
1719  ASSERT(result.is(v0));
1720  ASSERT(!scratch.is(scratch0()));
1721  ASSERT(!scratch.is(object));
1722 
1723  __ SmiTst(object, at);
1724  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1725  __ GetObjectType(object, scratch, scratch);
1726  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
1727 
1728  if (index->value() == 0) {
1729  __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1730  } else {
1731  if (index->value() < JSDate::kFirstUncachedField) {
1732  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1733  __ li(scratch, Operand(stamp));
1734  __ lw(scratch, MemOperand(scratch));
1735  __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1736  __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1737  __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1738  kPointerSize * index->value()));
1739  __ jmp(&done);
1740  }
1741  __ bind(&runtime);
1742  __ PrepareCallCFunction(2, scratch);
1743  __ li(a1, Operand(index));
1744  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1745  __ bind(&done);
1746  }
1747 }
1748 
1749 
1750 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1751  LOperand* index,
1752  String::Encoding encoding) {
1753  if (index->IsConstantOperand()) {
1754  int offset = ToInteger32(LConstantOperand::cast(index));
1755  if (encoding == String::TWO_BYTE_ENCODING) {
1756  offset *= kUC16Size;
1757  }
1758  STATIC_ASSERT(kCharSize == 1);
1759  return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1760  }
1761  Register scratch = scratch0();
1762  ASSERT(!scratch.is(string));
1763  ASSERT(!scratch.is(ToRegister(index)));
1764  if (encoding == String::ONE_BYTE_ENCODING) {
1765  __ Addu(scratch, string, ToRegister(index));
1766  } else {
1767  STATIC_ASSERT(kUC16Size == 2);
1768  __ sll(scratch, ToRegister(index), 1);
1769  __ Addu(scratch, string, scratch);
1770  }
1771  return FieldMemOperand(scratch, SeqString::kHeaderSize);
1772 }
1773 
1774 
1775 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1776  String::Encoding encoding = instr->hydrogen()->encoding();
1777  Register string = ToRegister(instr->string());
1778  Register result = ToRegister(instr->result());
1779 
1780  if (FLAG_debug_code) {
1781  Register scratch = scratch0();
1782  __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1783  __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1784 
1785  __ And(scratch, scratch,
1787  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1788  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1789  __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1790  ? one_byte_seq_type : two_byte_seq_type));
1791  __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1792  }
1793 
1794  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1795  if (encoding == String::ONE_BYTE_ENCODING) {
1796  __ lbu(result, operand);
1797  } else {
1798  __ lhu(result, operand);
1799  }
1800 }
1801 
1802 
1803 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1804  String::Encoding encoding = instr->hydrogen()->encoding();
1805  Register string = ToRegister(instr->string());
1806  Register value = ToRegister(instr->value());
1807 
1808  if (FLAG_debug_code) {
1809  Register scratch = scratch0();
1810  Register index = ToRegister(instr->index());
1811  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1812  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1813  int encoding_mask =
1814  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1815  ? one_byte_seq_type : two_byte_seq_type;
1816  __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1817  }
1818 
1819  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1820  if (encoding == String::ONE_BYTE_ENCODING) {
1821  __ sb(value, operand);
1822  } else {
1823  __ sh(value, operand);
1824  }
1825 }
1826 
1827 
1828 void LCodeGen::DoAddI(LAddI* instr) {
1829  LOperand* left = instr->left();
1830  LOperand* right = instr->right();
1831  LOperand* result = instr->result();
1832  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1833 
1834  if (!can_overflow) {
1835  if (right->IsStackSlot()) {
1836  Register right_reg = EmitLoadRegister(right, at);
1837  __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1838  } else {
1839  ASSERT(right->IsRegister() || right->IsConstantOperand());
1840  __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1841  }
1842  } else { // can_overflow.
1843  Register overflow = scratch0();
1844  Register scratch = scratch1();
1845  if (right->IsStackSlot() ||
1846  right->IsConstantOperand()) {
1847  Register right_reg = EmitLoadRegister(right, scratch);
1848  __ AdduAndCheckForOverflow(ToRegister(result),
1849  ToRegister(left),
1850  right_reg,
1851  overflow); // Reg at also used as scratch.
1852  } else {
1853  ASSERT(right->IsRegister());
1854  // Due to overflow check macros not supporting constant operands,
1855  // handling the IsConstantOperand case was moved to prev if clause.
1856  __ AdduAndCheckForOverflow(ToRegister(result),
1857  ToRegister(left),
1858  ToRegister(right),
1859  overflow); // Reg at also used as scratch.
1860  }
1861  DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1862  }
1863 }
1864 
1865 
1866 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1867  LOperand* left = instr->left();
1868  LOperand* right = instr->right();
1869  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1870  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1871  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1872  Register left_reg = ToRegister(left);
1873  Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1874  ? ToOperand(right)
1875  : Operand(EmitLoadRegister(right, at));
1876  Register result_reg = ToRegister(instr->result());
1877  Label return_right, done;
1878  if (!result_reg.is(left_reg)) {
1879  __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
1880  __ mov(result_reg, left_reg);
1881  __ Branch(&done);
1882  }
1883  __ Branch(&done, condition, left_reg, right_op);
1884  __ bind(&return_right);
1885  __ Addu(result_reg, zero_reg, right_op);
1886  __ bind(&done);
1887  } else {
1888  ASSERT(instr->hydrogen()->representation().IsDouble());
1889  FPURegister left_reg = ToDoubleRegister(left);
1890  FPURegister right_reg = ToDoubleRegister(right);
1891  FPURegister result_reg = ToDoubleRegister(instr->result());
1892  Label check_nan_left, check_zero, return_left, return_right, done;
1893  __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1894  __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1895  __ Branch(&return_right);
1896 
1897  __ bind(&check_zero);
1898  // left == right != 0.
1899  __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1900  // At this point, both left and right are either 0 or -0.
1901  if (operation == HMathMinMax::kMathMin) {
1902  __ neg_d(left_reg, left_reg);
1903  __ sub_d(result_reg, left_reg, right_reg);
1904  __ neg_d(result_reg, result_reg);
1905  } else {
1906  __ add_d(result_reg, left_reg, right_reg);
1907  }
1908  __ Branch(&done);
1909 
1910  __ bind(&check_nan_left);
1911  // left == NaN.
1912  __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1913  __ bind(&return_right);
1914  if (!right_reg.is(result_reg)) {
1915  __ mov_d(result_reg, right_reg);
1916  }
1917  __ Branch(&done);
1918 
1919  __ bind(&return_left);
1920  if (!left_reg.is(result_reg)) {
1921  __ mov_d(result_reg, left_reg);
1922  }
1923  __ bind(&done);
1924  }
1925 }
1926 
1927 
1928 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1929  DoubleRegister left = ToDoubleRegister(instr->left());
1930  DoubleRegister right = ToDoubleRegister(instr->right());
1931  DoubleRegister result = ToDoubleRegister(instr->result());
1932  switch (instr->op()) {
1933  case Token::ADD:
1934  __ add_d(result, left, right);
1935  break;
1936  case Token::SUB:
1937  __ sub_d(result, left, right);
1938  break;
1939  case Token::MUL:
1940  __ mul_d(result, left, right);
1941  break;
1942  case Token::DIV:
1943  __ div_d(result, left, right);
1944  break;
1945  case Token::MOD: {
1946  // Save a0-a3 on the stack.
1947  RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1948  __ MultiPush(saved_regs);
1949 
1950  __ PrepareCallCFunction(0, 2, scratch0());
1951  __ MovToFloatParameters(left, right);
1952  __ CallCFunction(
1953  ExternalReference::mod_two_doubles_operation(isolate()),
1954  0, 2);
1955  // Move the result in the double result register.
1956  __ MovFromFloatResult(result);
1957 
1958  // Restore saved register.
1959  __ MultiPop(saved_regs);
1960  break;
1961  }
1962  default:
1963  UNREACHABLE();
1964  break;
1965  }
1966 }
1967 
1968 
1969 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1970  ASSERT(ToRegister(instr->context()).is(cp));
1971  ASSERT(ToRegister(instr->left()).is(a1));
1972  ASSERT(ToRegister(instr->right()).is(a0));
1973  ASSERT(ToRegister(instr->result()).is(v0));
1974 
1975  BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
1976  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1977  // Other arch use a nop here, to signal that there is no inlined
1978  // patchable code. Mips does not need the nop, since our marker
1979  // instruction (andi zero_reg) will never be used in normal code.
1980 }
1981 
1982 
1983 template<class InstrType>
1984 void LCodeGen::EmitBranch(InstrType instr,
1985  Condition condition,
1986  Register src1,
1987  const Operand& src2) {
1988  int left_block = instr->TrueDestination(chunk_);
1989  int right_block = instr->FalseDestination(chunk_);
1990 
1991  int next_block = GetNextEmittedBlock();
1992  if (right_block == left_block || condition == al) {
1993  EmitGoto(left_block);
1994  } else if (left_block == next_block) {
1995  __ Branch(chunk_->GetAssemblyLabel(right_block),
1996  NegateCondition(condition), src1, src2);
1997  } else if (right_block == next_block) {
1998  __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1999  } else {
2000  __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2001  __ Branch(chunk_->GetAssemblyLabel(right_block));
2002  }
2003 }
2004 
2005 
2006 template<class InstrType>
2007 void LCodeGen::EmitBranchF(InstrType instr,
2008  Condition condition,
2009  FPURegister src1,
2010  FPURegister src2) {
2011  int right_block = instr->FalseDestination(chunk_);
2012  int left_block = instr->TrueDestination(chunk_);
2013 
2014  int next_block = GetNextEmittedBlock();
2015  if (right_block == left_block) {
2016  EmitGoto(left_block);
2017  } else if (left_block == next_block) {
2018  __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
2019  NegateCondition(condition), src1, src2);
2020  } else if (right_block == next_block) {
2021  __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2022  condition, src1, src2);
2023  } else {
2024  __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2025  condition, src1, src2);
2026  __ Branch(chunk_->GetAssemblyLabel(right_block));
2027  }
2028 }
2029 
2030 
2031 template<class InstrType>
2032 void LCodeGen::EmitFalseBranch(InstrType instr,
2033  Condition condition,
2034  Register src1,
2035  const Operand& src2) {
2036  int false_block = instr->FalseDestination(chunk_);
2037  __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2038 }
2039 
2040 
2041 template<class InstrType>
2042 void LCodeGen::EmitFalseBranchF(InstrType instr,
2043  Condition condition,
2044  FPURegister src1,
2045  FPURegister src2) {
2046  int false_block = instr->FalseDestination(chunk_);
2047  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2048  condition, src1, src2);
2049 }
2050 
2051 
2052 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2053  __ stop("LDebugBreak");
2054 }
2055 
2056 
2057 void LCodeGen::DoBranch(LBranch* instr) {
2058  Representation r = instr->hydrogen()->value()->representation();
2059  if (r.IsInteger32() || r.IsSmi()) {
2060  ASSERT(!info()->IsStub());
2061  Register reg = ToRegister(instr->value());
2062  EmitBranch(instr, ne, reg, Operand(zero_reg));
2063  } else if (r.IsDouble()) {
2064  ASSERT(!info()->IsStub());
2065  DoubleRegister reg = ToDoubleRegister(instr->value());
2066  // Test the double value. Zero and NaN are false.
2067  EmitBranchF(instr, nue, reg, kDoubleRegZero);
2068  } else {
2069  ASSERT(r.IsTagged());
2070  Register reg = ToRegister(instr->value());
2071  HType type = instr->hydrogen()->value()->type();
2072  if (type.IsBoolean()) {
2073  ASSERT(!info()->IsStub());
2074  __ LoadRoot(at, Heap::kTrueValueRootIndex);
2075  EmitBranch(instr, eq, reg, Operand(at));
2076  } else if (type.IsSmi()) {
2077  ASSERT(!info()->IsStub());
2078  EmitBranch(instr, ne, reg, Operand(zero_reg));
2079  } else if (type.IsJSArray()) {
2080  ASSERT(!info()->IsStub());
2081  EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2082  } else if (type.IsHeapNumber()) {
2083  ASSERT(!info()->IsStub());
2084  DoubleRegister dbl_scratch = double_scratch0();
2085  __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2086  // Test the double value. Zero and NaN are false.
2087  EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
2088  } else if (type.IsString()) {
2089  ASSERT(!info()->IsStub());
2091  EmitBranch(instr, ne, at, Operand(zero_reg));
2092  } else {
2093  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2094  // Avoid deopts in the case where we've never executed this path before.
2095  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2096 
2097  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2098  // undefined -> false.
2099  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2100  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2101  }
2102  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2103  // Boolean -> its value.
2104  __ LoadRoot(at, Heap::kTrueValueRootIndex);
2105  __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2106  __ LoadRoot(at, Heap::kFalseValueRootIndex);
2107  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2108  }
2109  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2110  // 'null' -> false.
2111  __ LoadRoot(at, Heap::kNullValueRootIndex);
2112  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2113  }
2114 
2115  if (expected.Contains(ToBooleanStub::SMI)) {
2116  // Smis: 0 -> false, all other -> true.
2117  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2118  __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2119  } else if (expected.NeedsMap()) {
2120  // If we need a map later and have a Smi -> deopt.
2121  __ SmiTst(reg, at);
2122  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
2123  }
2124 
2125  const Register map = scratch0();
2126  if (expected.NeedsMap()) {
2127  __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2128  if (expected.CanBeUndetectable()) {
2129  // Undetectable -> false.
2130  __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2131  __ And(at, at, Operand(1 << Map::kIsUndetectable));
2132  __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2133  }
2134  }
2135 
2136  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2137  // spec object -> true.
2139  __ Branch(instr->TrueLabel(chunk_),
2140  ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2141  }
2142 
2143  if (expected.Contains(ToBooleanStub::STRING)) {
2144  // String value -> false iff empty.
2145  Label not_string;
2147  __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2149  __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2150  __ Branch(instr->FalseLabel(chunk_));
2151  __ bind(&not_string);
2152  }
2153 
2154  if (expected.Contains(ToBooleanStub::SYMBOL)) {
2155  // Symbol value -> true.
2156  const Register scratch = scratch1();
2157  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2158  __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2159  }
2160 
2161  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2162  // heap number -> false iff +0, -0, or NaN.
2163  DoubleRegister dbl_scratch = double_scratch0();
2164  Label not_heap_number;
2165  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2166  __ Branch(&not_heap_number, ne, map, Operand(at));
2167  __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2168  __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2169  ne, dbl_scratch, kDoubleRegZero);
2170  // Falls through if dbl_scratch == 0.
2171  __ Branch(instr->FalseLabel(chunk_));
2172  __ bind(&not_heap_number);
2173  }
2174 
2175  if (!expected.IsGeneric()) {
2176  // We've seen something for the first time -> deopt.
2177  // This can only happen if we are not generic already.
2178  DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
2179  }
2180  }
2181  }
2182 }
2183 
2184 
2185 void LCodeGen::EmitGoto(int block) {
2186  if (!IsNextEmittedBlock(block)) {
2187  __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2188  }
2189 }
2190 
2191 
2192 void LCodeGen::DoGoto(LGoto* instr) {
2193  EmitGoto(instr->block_id());
2194 }
2195 
2196 
2197 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2198  Condition cond = kNoCondition;
2199  switch (op) {
2200  case Token::EQ:
2201  case Token::EQ_STRICT:
2202  cond = eq;
2203  break;
2204  case Token::NE:
2205  case Token::NE_STRICT:
2206  cond = ne;
2207  break;
2208  case Token::LT:
2209  cond = is_unsigned ? lo : lt;
2210  break;
2211  case Token::GT:
2212  cond = is_unsigned ? hi : gt;
2213  break;
2214  case Token::LTE:
2215  cond = is_unsigned ? ls : le;
2216  break;
2217  case Token::GTE:
2218  cond = is_unsigned ? hs : ge;
2219  break;
2220  case Token::IN:
2221  case Token::INSTANCEOF:
2222  default:
2223  UNREACHABLE();
2224  }
2225  return cond;
2226 }
2227 
2228 
2229 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2230  LOperand* left = instr->left();
2231  LOperand* right = instr->right();
2232  Condition cond = TokenToCondition(instr->op(), false);
2233 
2234  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2235  // We can statically evaluate the comparison.
2236  double left_val = ToDouble(LConstantOperand::cast(left));
2237  double right_val = ToDouble(LConstantOperand::cast(right));
2238  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2239  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2240  EmitGoto(next_block);
2241  } else {
2242  if (instr->is_double()) {
2243  // Compare left and right as doubles and load the
2244  // resulting flags into the normal status register.
2245  FPURegister left_reg = ToDoubleRegister(left);
2246  FPURegister right_reg = ToDoubleRegister(right);
2247 
2248  // If a NaN is involved, i.e. the result is unordered,
2249  // jump to false block label.
2250  __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2251  left_reg, right_reg);
2252 
2253  EmitBranchF(instr, cond, left_reg, right_reg);
2254  } else {
2255  Register cmp_left;
2256  Operand cmp_right = Operand(0);
2257 
2258  if (right->IsConstantOperand()) {
2259  int32_t value = ToInteger32(LConstantOperand::cast(right));
2260  if (instr->hydrogen_value()->representation().IsSmi()) {
2261  cmp_left = ToRegister(left);
2262  cmp_right = Operand(Smi::FromInt(value));
2263  } else {
2264  cmp_left = ToRegister(left);
2265  cmp_right = Operand(value);
2266  }
2267  } else if (left->IsConstantOperand()) {
2268  int32_t value = ToInteger32(LConstantOperand::cast(left));
2269  if (instr->hydrogen_value()->representation().IsSmi()) {
2270  cmp_left = ToRegister(right);
2271  cmp_right = Operand(Smi::FromInt(value));
2272  } else {
2273  cmp_left = ToRegister(right);
2274  cmp_right = Operand(value);
2275  }
2276  // We transposed the operands. Reverse the condition.
2277  cond = ReverseCondition(cond);
2278  } else {
2279  cmp_left = ToRegister(left);
2280  cmp_right = Operand(ToRegister(right));
2281  }
2282 
2283  EmitBranch(instr, cond, cmp_left, cmp_right);
2284  }
2285  }
2286 }
2287 
2288 
2289 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2290  Register left = ToRegister(instr->left());
2291  Register right = ToRegister(instr->right());
2292 
2293  EmitBranch(instr, eq, left, Operand(right));
2294 }
2295 
2296 
2297 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2298  if (instr->hydrogen()->representation().IsTagged()) {
2299  Register input_reg = ToRegister(instr->object());
2300  __ li(at, Operand(factory()->the_hole_value()));
2301  EmitBranch(instr, eq, input_reg, Operand(at));
2302  return;
2303  }
2304 
2305  DoubleRegister input_reg = ToDoubleRegister(instr->object());
2306  EmitFalseBranchF(instr, eq, input_reg, input_reg);
2307 
2308  Register scratch = scratch0();
2309  __ FmoveHigh(scratch, input_reg);
2310  EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2311 }
2312 
2313 
2314 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2315  Representation rep = instr->hydrogen()->value()->representation();
2316  ASSERT(!rep.IsInteger32());
2317  Register scratch = ToRegister(instr->temp());
2318 
2319  if (rep.IsDouble()) {
2320  DoubleRegister value = ToDoubleRegister(instr->value());
2321  EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
2322  __ FmoveHigh(scratch, value);
2323  __ li(at, 0x80000000);
2324  } else {
2325  Register value = ToRegister(instr->value());
2326  __ CheckMap(value,
2327  scratch,
2328  Heap::kHeapNumberMapRootIndex,
2329  instr->FalseLabel(chunk()),
2330  DO_SMI_CHECK);
2331  __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2332  EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
2333  __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2334  __ mov(at, zero_reg);
2335  }
2336  EmitBranch(instr, eq, scratch, Operand(at));
2337 }
2338 
2339 
2340 Condition LCodeGen::EmitIsObject(Register input,
2341  Register temp1,
2342  Register temp2,
2343  Label* is_not_object,
2344  Label* is_object) {
2345  __ JumpIfSmi(input, is_not_object);
2346 
2347  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2348  __ Branch(is_object, eq, input, Operand(temp2));
2349 
2350  // Load map.
2351  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2352  // Undetectable objects behave like undefined.
2353  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2354  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2355  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2356 
2357  // Load instance type and check that it is in object type range.
2358  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2359  __ Branch(is_not_object,
2360  lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2361 
2362  return le;
2363 }
2364 
2365 
2366 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2367  Register reg = ToRegister(instr->value());
2368  Register temp1 = ToRegister(instr->temp());
2369  Register temp2 = scratch0();
2370 
2371  Condition true_cond =
2372  EmitIsObject(reg, temp1, temp2,
2373  instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2374 
2375  EmitBranch(instr, true_cond, temp2,
2377 }
2378 
2379 
2380 Condition LCodeGen::EmitIsString(Register input,
2381  Register temp1,
2382  Label* is_not_string,
2383  SmiCheck check_needed = INLINE_SMI_CHECK) {
2384  if (check_needed == INLINE_SMI_CHECK) {
2385  __ JumpIfSmi(input, is_not_string);
2386  }
2387  __ GetObjectType(input, temp1, temp1);
2388 
2389  return lt;
2390 }
2391 
2392 
2393 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2394  Register reg = ToRegister(instr->value());
2395  Register temp1 = ToRegister(instr->temp());
2396 
2397  SmiCheck check_needed =
2398  instr->hydrogen()->value()->IsHeapObject()
2400  Condition true_cond =
2401  EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2402 
2403  EmitBranch(instr, true_cond, temp1,
2404  Operand(FIRST_NONSTRING_TYPE));
2405 }
2406 
2407 
2408 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2409  Register input_reg = EmitLoadRegister(instr->value(), at);
2410  __ And(at, input_reg, kSmiTagMask);
2411  EmitBranch(instr, eq, at, Operand(zero_reg));
2412 }
2413 
2414 
2415 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2416  Register input = ToRegister(instr->value());
2417  Register temp = ToRegister(instr->temp());
2418 
2419  if (!instr->hydrogen()->value()->IsHeapObject()) {
2420  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2421  }
2422  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2423  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2424  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2425  EmitBranch(instr, ne, at, Operand(zero_reg));
2426 }
2427 
2428 
2429 static Condition ComputeCompareCondition(Token::Value op) {
2430  switch (op) {
2431  case Token::EQ_STRICT:
2432  case Token::EQ:
2433  return eq;
2434  case Token::LT:
2435  return lt;
2436  case Token::GT:
2437  return gt;
2438  case Token::LTE:
2439  return le;
2440  case Token::GTE:
2441  return ge;
2442  default:
2443  UNREACHABLE();
2444  return kNoCondition;
2445  }
2446 }
2447 
2448 
2449 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2450  ASSERT(ToRegister(instr->context()).is(cp));
2451  Token::Value op = instr->op();
2452 
2453  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2454  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2455 
2456  Condition condition = ComputeCompareCondition(op);
2457 
2458  EmitBranch(instr, condition, v0, Operand(zero_reg));
2459 }
2460 
2461 
2462 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2463  InstanceType from = instr->from();
2464  InstanceType to = instr->to();
2465  if (from == FIRST_TYPE) return to;
2466  ASSERT(from == to || to == LAST_TYPE);
2467  return from;
2468 }
2469 
2470 
2471 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2472  InstanceType from = instr->from();
2473  InstanceType to = instr->to();
2474  if (from == to) return eq;
2475  if (to == LAST_TYPE) return hs;
2476  if (from == FIRST_TYPE) return ls;
2477  UNREACHABLE();
2478  return eq;
2479 }
2480 
2481 
2482 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2483  Register scratch = scratch0();
2484  Register input = ToRegister(instr->value());
2485 
2486  if (!instr->hydrogen()->value()->IsHeapObject()) {
2487  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2488  }
2489 
2490  __ GetObjectType(input, scratch, scratch);
2491  EmitBranch(instr,
2492  BranchCondition(instr->hydrogen()),
2493  scratch,
2494  Operand(TestType(instr->hydrogen())));
2495 }
2496 
2497 
2498 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2499  Register input = ToRegister(instr->value());
2500  Register result = ToRegister(instr->result());
2501 
2502  __ AssertString(input);
2503 
2504  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2505  __ IndexFromHash(result, result);
2506 }
2507 
2508 
2509 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2510  LHasCachedArrayIndexAndBranch* instr) {
2511  Register input = ToRegister(instr->value());
2512  Register scratch = scratch0();
2513 
2514  __ lw(scratch,
2516  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2517  EmitBranch(instr, eq, at, Operand(zero_reg));
2518 }
2519 
2520 
2521 // Branches to a label or falls through with the answer in flags. Trashes
2522 // the temp registers, but not the input.
2523 void LCodeGen::EmitClassOfTest(Label* is_true,
2524  Label* is_false,
2525  Handle<String>class_name,
2526  Register input,
2527  Register temp,
2528  Register temp2) {
2529  ASSERT(!input.is(temp));
2530  ASSERT(!input.is(temp2));
2531  ASSERT(!temp.is(temp2));
2532 
2533  __ JumpIfSmi(input, is_false);
2534 
2535  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2536  // Assuming the following assertions, we can use the same compares to test
2537  // for both being a function type and being in the object type range.
2542  LAST_SPEC_OBJECT_TYPE - 1);
2544 
2545  __ GetObjectType(input, temp, temp2);
2546  __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2547  __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2548  __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2549  } else {
2550  // Faster code path to avoid two compares: subtract lower bound from the
2551  // actual type and do a signed compare with the width of the type range.
2552  __ GetObjectType(input, temp, temp2);
2553  __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2554  __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2556  }
2557 
2558  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2559  // Check if the constructor in the map is a function.
2560  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2561 
2562  // Objects with a non-function constructor have class 'Object'.
2563  __ GetObjectType(temp, temp2, temp2);
2564  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2565  __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2566  } else {
2567  __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2568  }
2569 
2570  // temp now contains the constructor function. Grab the
2571  // instance class name from there.
2573  __ lw(temp, FieldMemOperand(temp,
2575  // The class name we are testing against is internalized since it's a literal.
2576  // The name in the constructor is internalized because of the way the context
2577  // is booted. This routine isn't expected to work for random API-created
2578  // classes and it doesn't have to because you can't access it with natives
2579  // syntax. Since both sides are internalized it is sufficient to use an
2580  // identity comparison.
2581 
2582  // End with the address of this class_name instance in temp register.
2583  // On MIPS, the caller must do the comparison with Handle<String>class_name.
2584 }
2585 
2586 
2587 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2588  Register input = ToRegister(instr->value());
2589  Register temp = scratch0();
2590  Register temp2 = ToRegister(instr->temp());
2591  Handle<String> class_name = instr->hydrogen()->class_name();
2592 
2593  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2594  class_name, input, temp, temp2);
2595 
2596  EmitBranch(instr, eq, temp, Operand(class_name));
2597 }
2598 
2599 
2600 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2601  Register reg = ToRegister(instr->value());
2602  Register temp = ToRegister(instr->temp());
2603 
2604  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2605  EmitBranch(instr, eq, temp, Operand(instr->map()));
2606 }
2607 
2608 
2609 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2610  ASSERT(ToRegister(instr->context()).is(cp));
2611  Label true_label, done;
2612  ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
2613  ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
2614  Register result = ToRegister(instr->result());
2615  ASSERT(result.is(v0));
2616 
2617  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2618  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2619 
2620  __ Branch(&true_label, eq, result, Operand(zero_reg));
2621  __ li(result, Operand(factory()->false_value()));
2622  __ Branch(&done);
2623  __ bind(&true_label);
2624  __ li(result, Operand(factory()->true_value()));
2625  __ bind(&done);
2626 }
2627 
2628 
2629 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2630  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2631  public:
2632  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2633  LInstanceOfKnownGlobal* instr)
2634  : LDeferredCode(codegen), instr_(instr) { }
2635  virtual void Generate() V8_OVERRIDE {
2636  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2637  }
2638  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2639  Label* map_check() { return &map_check_; }
2640 
2641  private:
2642  LInstanceOfKnownGlobal* instr_;
2643  Label map_check_;
2644  };
2645 
2646  DeferredInstanceOfKnownGlobal* deferred;
2647  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2648 
2649  Label done, false_result;
2650  Register object = ToRegister(instr->value());
2651  Register temp = ToRegister(instr->temp());
2652  Register result = ToRegister(instr->result());
2653 
2654  ASSERT(object.is(a0));
2655  ASSERT(result.is(v0));
2656 
2657  // A Smi is not instance of anything.
2658  __ JumpIfSmi(object, &false_result);
2659 
2660  // This is the inlined call site instanceof cache. The two occurences of the
2661  // hole value will be patched to the last map/result pair generated by the
2662  // instanceof stub.
2663  Label cache_miss;
2664  Register map = temp;
2665  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2666 
2667  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2668  __ bind(deferred->map_check()); // Label for calculating code patching.
2669  // We use Factory::the_hole_value() on purpose instead of loading from the
2670  // root array to force relocation to be able to later patch with
2671  // the cached map.
2672  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2673  __ li(at, Operand(Handle<Object>(cell)));
2675  __ BranchShort(&cache_miss, ne, map, Operand(at));
2676  // We use Factory::the_hole_value() on purpose instead of loading from the
2677  // root array to force relocation to be able to later patch
2678  // with true or false. The distance from map check has to be constant.
2679  __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2680  __ Branch(&done);
2681 
2682  // The inlined call site cache did not match. Check null and string before
2683  // calling the deferred code.
2684  __ bind(&cache_miss);
2685  // Null is not instance of anything.
2686  __ LoadRoot(temp, Heap::kNullValueRootIndex);
2687  __ Branch(&false_result, eq, object, Operand(temp));
2688 
2689  // String values is not instance of anything.
2690  Condition cc = __ IsObjectStringType(object, temp, temp);
2691  __ Branch(&false_result, cc, temp, Operand(zero_reg));
2692 
2693  // Go to the deferred code.
2694  __ Branch(deferred->entry());
2695 
2696  __ bind(&false_result);
2697  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2698 
2699  // Here result has either true or false. Deferred code also produces true or
2700  // false object.
2701  __ bind(deferred->exit());
2702  __ bind(&done);
2703 }
2704 
2705 
2706 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2707  Label* map_check) {
2708  Register result = ToRegister(instr->result());
2709  ASSERT(result.is(v0));
2710 
2712  flags = static_cast<InstanceofStub::Flags>(
2714  flags = static_cast<InstanceofStub::Flags>(
2716  flags = static_cast<InstanceofStub::Flags>(
2718  InstanceofStub stub(flags);
2719 
2720  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2721  LoadContextFromDeferred(instr->context());
2722 
2723  // Get the temp register reserved by the instruction. This needs to be t0 as
2724  // its slot of the pushing of safepoint registers is used to communicate the
2725  // offset to the location of the map check.
2726  Register temp = ToRegister(instr->temp());
2727  ASSERT(temp.is(t0));
2728  __ li(InstanceofStub::right(), instr->function());
2729  static const int kAdditionalDelta = 7;
2730  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2731  Label before_push_delta;
2732  __ bind(&before_push_delta);
2733  {
2734  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2735  __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2736  __ StoreToSafepointRegisterSlot(temp, temp);
2737  }
2738  CallCodeGeneric(stub.GetCode(isolate()),
2739  RelocInfo::CODE_TARGET,
2740  instr,
2741  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2742  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2743  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2744  // Put the result value into the result register slot and
2745  // restore all registers.
2746  __ StoreToSafepointRegisterSlot(result, result);
2747 }
2748 
2749 
2750 void LCodeGen::DoCmpT(LCmpT* instr) {
2751  ASSERT(ToRegister(instr->context()).is(cp));
2752  Token::Value op = instr->op();
2753 
2754  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2755  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2756  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2757 
2758  Condition condition = ComputeCompareCondition(op);
2759  // A minor optimization that relies on LoadRoot always emitting one
2760  // instruction.
2761  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2762  Label done, check;
2763  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2764  __ bind(&check);
2765  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2766  ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
2767  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2768  __ bind(&done);
2769 }
2770 
2771 
2772 void LCodeGen::DoReturn(LReturn* instr) {
2773  if (FLAG_trace && info()->IsOptimizing()) {
2774  // Push the return value on the stack as the parameter.
2775  // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2776  // managed by the register allocator and tearing down the frame, it's
2777  // safe to write to the context register.
2778  __ push(v0);
2780  __ CallRuntime(Runtime::kTraceExit, 1);
2781  }
2782  if (info()->saves_caller_doubles()) {
2783  RestoreCallerDoubles();
2784  }
2785  int no_frame_start = -1;
2786  if (NeedsEagerFrame()) {
2787  __ mov(sp, fp);
2788  no_frame_start = masm_->pc_offset();
2789  __ Pop(ra, fp);
2790  }
2791  if (instr->has_constant_parameter_count()) {
2792  int parameter_count = ToInteger32(instr->constant_parameter_count());
2793  int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2794  if (sp_delta != 0) {
2795  __ Addu(sp, sp, Operand(sp_delta));
2796  }
2797  } else {
2798  Register reg = ToRegister(instr->parameter_count());
2799  // The argument count parameter is a smi
2800  __ SmiUntag(reg);
2801  __ sll(at, reg, kPointerSizeLog2);
2802  __ Addu(sp, sp, at);
2803  }
2804 
2805  __ Jump(ra);
2806 
2807  if (no_frame_start != -1) {
2808  info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2809  }
2810 }
2811 
2812 
2813 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2814  Register result = ToRegister(instr->result());
2815  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2816  __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
2817  if (instr->hydrogen()->RequiresHoleCheck()) {
2818  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2819  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2820  }
2821 }
2822 
2823 
2824 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2825  ASSERT(ToRegister(instr->context()).is(cp));
2826  ASSERT(ToRegister(instr->global_object()).is(a0));
2827  ASSERT(ToRegister(instr->result()).is(v0));
2828 
2829  __ li(a2, Operand(instr->name()));
2830  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2831  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2832  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2833 }
2834 
2835 
2836 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2837  Register value = ToRegister(instr->value());
2838  Register cell = scratch0();
2839 
2840  // Load the cell.
2841  __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2842 
2843  // If the cell we are storing to contains the hole it could have
2844  // been deleted from the property dictionary. In that case, we need
2845  // to update the property details in the property dictionary to mark
2846  // it as no longer deleted.
2847  if (instr->hydrogen()->RequiresHoleCheck()) {
2848  // We use a temp to check the payload.
2849  Register payload = ToRegister(instr->temp());
2850  __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
2851  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2852  DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2853  }
2854 
2855  // Store the value.
2856  __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
2857  // Cells are always rescanned, so no write barrier here.
2858 }
2859 
2860 
2861 
2862 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2863  Register context = ToRegister(instr->context());
2864  Register result = ToRegister(instr->result());
2865 
2866  __ lw(result, ContextOperand(context, instr->slot_index()));
2867  if (instr->hydrogen()->RequiresHoleCheck()) {
2868  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2869 
2870  if (instr->hydrogen()->DeoptimizesOnHole()) {
2871  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2872  } else {
2873  Label is_not_hole;
2874  __ Branch(&is_not_hole, ne, result, Operand(at));
2875  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2876  __ bind(&is_not_hole);
2877  }
2878  }
2879 }
2880 
2881 
2882 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2883  Register context = ToRegister(instr->context());
2884  Register value = ToRegister(instr->value());
2885  Register scratch = scratch0();
2886  MemOperand target = ContextOperand(context, instr->slot_index());
2887 
2888  Label skip_assignment;
2889 
2890  if (instr->hydrogen()->RequiresHoleCheck()) {
2891  __ lw(scratch, target);
2892  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2893 
2894  if (instr->hydrogen()->DeoptimizesOnHole()) {
2895  DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2896  } else {
2897  __ Branch(&skip_assignment, ne, scratch, Operand(at));
2898  }
2899  }
2900 
2901  __ sw(value, target);
2902  if (instr->hydrogen()->NeedsWriteBarrier()) {
2903  SmiCheck check_needed =
2904  instr->hydrogen()->value()->IsHeapObject()
2906  __ RecordWriteContextSlot(context,
2907  target.offset(),
2908  value,
2909  scratch0(),
2910  GetRAState(),
2911  kSaveFPRegs,
2913  check_needed);
2914  }
2915 
2916  __ bind(&skip_assignment);
2917 }
2918 
2919 
2920 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2921  HObjectAccess access = instr->hydrogen()->access();
2922  int offset = access.offset();
2923  Register object = ToRegister(instr->object());
2924 
2925  if (access.IsExternalMemory()) {
2926  Register result = ToRegister(instr->result());
2927  MemOperand operand = MemOperand(object, offset);
2928  __ Load(result, operand, access.representation());
2929  return;
2930  }
2931 
2932  if (instr->hydrogen()->representation().IsDouble()) {
2933  DoubleRegister result = ToDoubleRegister(instr->result());
2934  __ ldc1(result, FieldMemOperand(object, offset));
2935  return;
2936  }
2937 
2938  Register result = ToRegister(instr->result());
2939  if (!access.IsInobject()) {
2940  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2941  object = result;
2942  }
2943  MemOperand operand = FieldMemOperand(object, offset);
2944  __ Load(result, operand, access.representation());
2945 }
2946 
2947 
2948 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2949  ASSERT(ToRegister(instr->context()).is(cp));
2950  ASSERT(ToRegister(instr->object()).is(a0));
2951  ASSERT(ToRegister(instr->result()).is(v0));
2952 
2953  // Name is always in a2.
2954  __ li(a2, Operand(instr->name()));
2955  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
2956  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2957 }
2958 
2959 
2960 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2961  Register scratch = scratch0();
2962  Register function = ToRegister(instr->function());
2963  Register result = ToRegister(instr->result());
2964 
2965  // Check that the function really is a function. Load map into the
2966  // result register.
2967  __ GetObjectType(function, result, scratch);
2968  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2969 
2970  // Make sure that the function has an instance prototype.
2971  Label non_instance;
2972  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2973  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2974  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2975 
2976  // Get the prototype or initial map from the function.
2977  __ lw(result,
2979 
2980  // Check that the function has a prototype or an initial map.
2981  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2982  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2983 
2984  // If the function does not have an initial map, we're done.
2985  Label done;
2986  __ GetObjectType(result, scratch, scratch);
2987  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2988 
2989  // Get the prototype from the initial map.
2990  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2991  __ Branch(&done);
2992 
2993  // Non-instance prototype: Fetch prototype from constructor field
2994  // in initial map.
2995  __ bind(&non_instance);
2996  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2997 
2998  // All done.
2999  __ bind(&done);
3000 }
3001 
3002 
3003 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3004  Register result = ToRegister(instr->result());
3005  __ LoadRoot(result, instr->index());
3006 }
3007 
3008 
3009 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3010  Register arguments = ToRegister(instr->arguments());
3011  Register result = ToRegister(instr->result());
3012  // There are two words between the frame pointer and the last argument.
3013  // Subtracting from length accounts for one of them add one more.
3014  if (instr->length()->IsConstantOperand()) {
3015  int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3016  if (instr->index()->IsConstantOperand()) {
3017  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3018  int index = (const_length - const_index) + 1;
3019  __ lw(result, MemOperand(arguments, index * kPointerSize));
3020  } else {
3021  Register index = ToRegister(instr->index());
3022  __ li(at, Operand(const_length + 1));
3023  __ Subu(result, at, index);
3024  __ sll(at, result, kPointerSizeLog2);
3025  __ Addu(at, arguments, at);
3026  __ lw(result, MemOperand(at));
3027  }
3028  } else if (instr->index()->IsConstantOperand()) {
3029  Register length = ToRegister(instr->length());
3030  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3031  int loc = const_index - 1;
3032  if (loc != 0) {
3033  __ Subu(result, length, Operand(loc));
3034  __ sll(at, result, kPointerSizeLog2);
3035  __ Addu(at, arguments, at);
3036  __ lw(result, MemOperand(at));
3037  } else {
3038  __ sll(at, length, kPointerSizeLog2);
3039  __ Addu(at, arguments, at);
3040  __ lw(result, MemOperand(at));
3041  }
3042  } else {
3043  Register length = ToRegister(instr->length());
3044  Register index = ToRegister(instr->index());
3045  __ Subu(result, length, index);
3046  __ Addu(result, result, 1);
3047  __ sll(at, result, kPointerSizeLog2);
3048  __ Addu(at, arguments, at);
3049  __ lw(result, MemOperand(at));
3050  }
3051 }
3052 
3053 
3054 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3055  Register external_pointer = ToRegister(instr->elements());
3056  Register key = no_reg;
3057  ElementsKind elements_kind = instr->elements_kind();
3058  bool key_is_constant = instr->key()->IsConstantOperand();
3059  int constant_key = 0;
3060  if (key_is_constant) {
3061  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3062  if (constant_key & 0xF0000000) {
3063  Abort(kArrayIndexConstantValueTooBig);
3064  }
3065  } else {
3066  key = ToRegister(instr->key());
3067  }
3068  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3069  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3070  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3071  int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
3073  : 0;
3074 
3075  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3076  elements_kind == FLOAT32_ELEMENTS ||
3077  elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3078  elements_kind == FLOAT64_ELEMENTS) {
3079  int base_offset =
3080  (instr->additional_index() << element_size_shift) + additional_offset;
3081  FPURegister result = ToDoubleRegister(instr->result());
3082  if (key_is_constant) {
3083  __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
3084  } else {
3085  __ sll(scratch0(), key, shift_size);
3086  __ Addu(scratch0(), scratch0(), external_pointer);
3087  }
3088  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3089  elements_kind == FLOAT32_ELEMENTS) {
3090  __ lwc1(result, MemOperand(scratch0(), base_offset));
3091  __ cvt_d_s(result, result);
3092  } else { // loading doubles, not floats.
3093  __ ldc1(result, MemOperand(scratch0(), base_offset));
3094  }
3095  } else {
3096  Register result = ToRegister(instr->result());
3097  MemOperand mem_operand = PrepareKeyedOperand(
3098  key, external_pointer, key_is_constant, constant_key,
3099  element_size_shift, shift_size,
3100  instr->additional_index(), additional_offset);
3101  switch (elements_kind) {
3103  case INT8_ELEMENTS:
3104  __ lb(result, mem_operand);
3105  break;
3108  case UINT8_ELEMENTS:
3110  __ lbu(result, mem_operand);
3111  break;
3113  case INT16_ELEMENTS:
3114  __ lh(result, mem_operand);
3115  break;
3117  case UINT16_ELEMENTS:
3118  __ lhu(result, mem_operand);
3119  break;
3121  case INT32_ELEMENTS:
3122  __ lw(result, mem_operand);
3123  break;
3125  case UINT32_ELEMENTS:
3126  __ lw(result, mem_operand);
3127  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3128  DeoptimizeIf(Ugreater_equal, instr->environment(),
3129  result, Operand(0x80000000));
3130  }
3131  break;
3132  case FLOAT32_ELEMENTS:
3133  case FLOAT64_ELEMENTS:
3136  case FAST_DOUBLE_ELEMENTS:
3137  case FAST_ELEMENTS:
3138  case FAST_SMI_ELEMENTS:
3140  case FAST_HOLEY_ELEMENTS:
3142  case DICTIONARY_ELEMENTS:
3144  UNREACHABLE();
3145  break;
3146  }
3147  }
3148 }
3149 
3150 
3151 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3152  Register elements = ToRegister(instr->elements());
3153  bool key_is_constant = instr->key()->IsConstantOperand();
3154  Register key = no_reg;
3155  DoubleRegister result = ToDoubleRegister(instr->result());
3156  Register scratch = scratch0();
3157 
3158  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3159 
3160  int base_offset =
3162  (instr->additional_index() << element_size_shift);
3163  if (key_is_constant) {
3164  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3165  if (constant_key & 0xF0000000) {
3166  Abort(kArrayIndexConstantValueTooBig);
3167  }
3168  base_offset += constant_key << element_size_shift;
3169  }
3170  __ Addu(scratch, elements, Operand(base_offset));
3171 
3172  if (!key_is_constant) {
3173  key = ToRegister(instr->key());
3174  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3175  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3176  __ sll(at, key, shift_size);
3177  __ Addu(scratch, scratch, at);
3178  }
3179 
3180  __ ldc1(result, MemOperand(scratch));
3181 
3182  if (instr->hydrogen()->RequiresHoleCheck()) {
3183  __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3184  DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
3185  }
3186 }
3187 
3188 
3189 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3190  Register elements = ToRegister(instr->elements());
3191  Register result = ToRegister(instr->result());
3192  Register scratch = scratch0();
3193  Register store_base = scratch;
3194  int offset = 0;
3195 
3196  if (instr->key()->IsConstantOperand()) {
3197  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3198  offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3199  instr->additional_index());
3200  store_base = elements;
3201  } else {
3202  Register key = ToRegister(instr->key());
3203  // Even though the HLoadKeyed instruction forces the input
3204  // representation for the key to be an integer, the input gets replaced
3205  // during bound check elimination with the index argument to the bounds
3206  // check, which can be tagged, so that case must be handled here, too.
3207  if (instr->hydrogen()->key()->representation().IsSmi()) {
3208  __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3209  __ addu(scratch, elements, scratch);
3210  } else {
3211  __ sll(scratch, key, kPointerSizeLog2);
3212  __ addu(scratch, elements, scratch);
3213  }
3214  offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3215  }
3216  __ lw(result, FieldMemOperand(store_base, offset));
3217 
3218  // Check for the hole value.
3219  if (instr->hydrogen()->RequiresHoleCheck()) {
3220  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3221  __ SmiTst(result, scratch);
3222  DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3223  } else {
3224  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3225  DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
3226  }
3227  }
3228 }
3229 
3230 
3231 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3232  if (instr->is_typed_elements()) {
3233  DoLoadKeyedExternalArray(instr);
3234  } else if (instr->hydrogen()->representation().IsDouble()) {
3235  DoLoadKeyedFixedDoubleArray(instr);
3236  } else {
3237  DoLoadKeyedFixedArray(instr);
3238  }
3239 }
3240 
3241 
3243  Register base,
3244  bool key_is_constant,
3245  int constant_key,
3246  int element_size,
3247  int shift_size,
3248  int additional_index,
3249  int additional_offset) {
3250  int base_offset = (additional_index << element_size) + additional_offset;
3251  if (key_is_constant) {
3252  return MemOperand(base,
3253  base_offset + (constant_key << element_size));
3254  }
3255 
3256  if (additional_offset != 0) {
3257  if (shift_size >= 0) {
3258  __ sll(scratch0(), key, shift_size);
3259  __ Addu(scratch0(), scratch0(), Operand(base_offset));
3260  } else {
3261  ASSERT_EQ(-1, shift_size);
3262  __ srl(scratch0(), key, 1);
3263  __ Addu(scratch0(), scratch0(), Operand(base_offset));
3264  }
3265  __ Addu(scratch0(), base, scratch0());
3266  return MemOperand(scratch0());
3267  }
3268 
3269  if (additional_index != 0) {
3270  additional_index *= 1 << (element_size - shift_size);
3271  __ Addu(scratch0(), key, Operand(additional_index));
3272  }
3273 
3274  if (additional_index == 0) {
3275  if (shift_size >= 0) {
3276  __ sll(scratch0(), key, shift_size);
3277  __ Addu(scratch0(), base, scratch0());
3278  return MemOperand(scratch0());
3279  } else {
3280  ASSERT_EQ(-1, shift_size);
3281  __ srl(scratch0(), key, 1);
3282  __ Addu(scratch0(), base, scratch0());
3283  return MemOperand(scratch0());
3284  }
3285  }
3286 
3287  if (shift_size >= 0) {
3288  __ sll(scratch0(), scratch0(), shift_size);
3289  __ Addu(scratch0(), base, scratch0());
3290  return MemOperand(scratch0());
3291  } else {
3292  ASSERT_EQ(-1, shift_size);
3293  __ srl(scratch0(), scratch0(), 1);
3294  __ Addu(scratch0(), base, scratch0());
3295  return MemOperand(scratch0());
3296  }
3297 }
3298 
3299 
3300 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3301  ASSERT(ToRegister(instr->context()).is(cp));
3302  ASSERT(ToRegister(instr->object()).is(a1));
3303  ASSERT(ToRegister(instr->key()).is(a0));
3304 
3305  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3306  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3307 }
3308 
3309 
3310 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3311  Register scratch = scratch0();
3312  Register temp = scratch1();
3313  Register result = ToRegister(instr->result());
3314 
3315  if (instr->hydrogen()->from_inlined()) {
3316  __ Subu(result, sp, 2 * kPointerSize);
3317  } else {
3318  // Check if the calling frame is an arguments adaptor frame.
3319  Label done, adapted;
3322  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3323 
3324  // Result is the frame pointer for the frame if not adapted and for the real
3325  // frame below the adaptor frame if adapted.
3326  __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3327  __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
3328  }
3329 }
3330 
3331 
3332 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3333  Register elem = ToRegister(instr->elements());
3334  Register result = ToRegister(instr->result());
3335 
3336  Label done;
3337 
3338  // If no arguments adaptor frame the number of arguments is fixed.
3339  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3340  __ Branch(&done, eq, fp, Operand(elem));
3341 
3342  // Arguments adaptor frame present. Get argument length from there.
3344  __ lw(result,
3346  __ SmiUntag(result);
3347 
3348  // Argument length is in result register.
3349  __ bind(&done);
3350 }
3351 
3352 
3353 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3354  Register receiver = ToRegister(instr->receiver());
3355  Register function = ToRegister(instr->function());
3356  Register result = ToRegister(instr->result());
3357  Register scratch = scratch0();
3358 
3359  // If the receiver is null or undefined, we have to pass the global
3360  // object as a receiver to normal functions. Values have to be
3361  // passed unchanged to builtins and strict-mode functions.
3362  Label global_object, result_in_receiver;
3363 
3364  if (!instr->hydrogen()->known_function()) {
3365  // Do not transform the receiver to object for strict mode
3366  // functions.
3367  __ lw(scratch,
3369  __ lw(scratch,
3371 
3372  // Do not transform the receiver to object for builtins.
3373  int32_t strict_mode_function_mask =
3375  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3376  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3377  __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
3378  }
3379 
3380  // Normal function. Replace undefined or null with global receiver.
3381  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3382  __ Branch(&global_object, eq, receiver, Operand(scratch));
3383  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3384  __ Branch(&global_object, eq, receiver, Operand(scratch));
3385 
3386  // Deoptimize if the receiver is not a JS object.
3387  __ SmiTst(receiver, scratch);
3388  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
3389 
3390  __ GetObjectType(receiver, scratch, scratch);
3391  DeoptimizeIf(lt, instr->environment(),
3392  scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
3393 
3394  __ Branch(&result_in_receiver);
3395  __ bind(&global_object);
3396  __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
3397  __ lw(result,
3399  __ lw(result,
3401 
3402  if (result.is(receiver)) {
3403  __ bind(&result_in_receiver);
3404  } else {
3405  Label result_ok;
3406  __ Branch(&result_ok);
3407  __ bind(&result_in_receiver);
3408  __ mov(result, receiver);
3409  __ bind(&result_ok);
3410  }
3411 }
3412 
3413 
3414 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3415  Register receiver = ToRegister(instr->receiver());
3416  Register function = ToRegister(instr->function());
3417  Register length = ToRegister(instr->length());
3418  Register elements = ToRegister(instr->elements());
3419  Register scratch = scratch0();
3420  ASSERT(receiver.is(a0)); // Used for parameter count.
3421  ASSERT(function.is(a1)); // Required by InvokeFunction.
3422  ASSERT(ToRegister(instr->result()).is(v0));
3423 
3424  // Copy the arguments to this function possibly from the
3425  // adaptor frame below it.
3426  const uint32_t kArgumentsLimit = 1 * KB;
3427  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
3428 
3429  // Push the receiver and use the register to keep the original
3430  // number of arguments.
3431  __ push(receiver);
3432  __ Move(receiver, length);
3433  // The arguments are at a one pointer size offset from elements.
3434  __ Addu(elements, elements, Operand(1 * kPointerSize));
3435 
3436  // Loop through the arguments pushing them onto the execution
3437  // stack.
3438  Label invoke, loop;
3439  // length is a small non-negative integer, due to the test above.
3440  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3441  __ sll(scratch, length, 2);
3442  __ bind(&loop);
3443  __ Addu(scratch, elements, scratch);
3444  __ lw(scratch, MemOperand(scratch));
3445  __ push(scratch);
3446  __ Subu(length, length, Operand(1));
3447  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3448  __ sll(scratch, length, 2);
3449 
3450  __ bind(&invoke);
3451  ASSERT(instr->HasPointerMap());
3452  LPointerMap* pointers = instr->pointer_map();
3453  SafepointGenerator safepoint_generator(
3454  this, pointers, Safepoint::kLazyDeopt);
3455  // The number of arguments is stored in receiver which is a0, as expected
3456  // by InvokeFunction.
3457  ParameterCount actual(receiver);
3458  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3459 }
3460 
3461 
3462 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3463  LOperand* argument = instr->value();
3464  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3465  Abort(kDoPushArgumentNotImplementedForDoubleType);
3466  } else {
3467  Register argument_reg = EmitLoadRegister(argument, at);
3468  __ push(argument_reg);
3469  }
3470 }
3471 
3472 
3473 void LCodeGen::DoDrop(LDrop* instr) {
3474  __ Drop(instr->count());
3475 }
3476 
3477 
3478 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3479  Register result = ToRegister(instr->result());
3481 }
3482 
3483 
3484 void LCodeGen::DoContext(LContext* instr) {
3485  // If there is a non-return use, the context must be moved to a register.
3486  Register result = ToRegister(instr->result());
3487  if (info()->IsOptimizing()) {
3489  } else {
3490  // If there is no frame, the context must be in cp.
3491  ASSERT(result.is(cp));
3492  }
3493 }
3494 
3495 
3496 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3497  ASSERT(ToRegister(instr->context()).is(cp));
3498  __ li(scratch0(), instr->hydrogen()->pairs());
3499  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3500  // The context is the first argument.
3501  __ Push(cp, scratch0(), scratch1());
3502  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3503 }
3504 
3505 
3506 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3508  int arity,
3509  LInstruction* instr,
3510  A1State a1_state) {
3511  bool dont_adapt_arguments =
3512  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3513  bool can_invoke_directly =
3514  dont_adapt_arguments || formal_parameter_count == arity;
3515 
3516  LPointerMap* pointers = instr->pointer_map();
3517 
3518  if (can_invoke_directly) {
3519  if (a1_state == A1_UNINITIALIZED) {
3520  __ li(a1, function);
3521  }
3522 
3523  // Change context.
3525 
3526  // Set r0 to arguments count if adaption is not needed. Assumes that r0
3527  // is available to write to at this point.
3528  if (dont_adapt_arguments) {
3529  __ li(a0, Operand(arity));
3530  }
3531 
3532  // Invoke function.
3534  __ Call(at);
3535 
3536  // Set up deoptimization.
3537  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3538  } else {
3539  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3540  ParameterCount count(arity);
3541  ParameterCount expected(formal_parameter_count);
3542  __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3543  }
3544 }
3545 
3546 
3547 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3548  ASSERT(instr->context() != NULL);
3549  ASSERT(ToRegister(instr->context()).is(cp));
3550  Register input = ToRegister(instr->value());
3551  Register result = ToRegister(instr->result());
3552  Register scratch = scratch0();
3553 
3554  // Deoptimize if not a heap number.
3555  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3556  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3557  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
3558 
3559  Label done;
3560  Register exponent = scratch0();
3561  scratch = no_reg;
3562  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3563  // Check the sign of the argument. If the argument is positive, just
3564  // return it.
3565  __ Move(result, input);
3566  __ And(at, exponent, Operand(HeapNumber::kSignMask));
3567  __ Branch(&done, eq, at, Operand(zero_reg));
3568 
3569  // Input is negative. Reverse its sign.
3570  // Preserve the value of all registers.
3571  {
3572  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3573 
3574  // Registers were saved at the safepoint, so we can use
3575  // many scratch registers.
3576  Register tmp1 = input.is(a1) ? a0 : a1;
3577  Register tmp2 = input.is(a2) ? a0 : a2;
3578  Register tmp3 = input.is(a3) ? a0 : a3;
3579  Register tmp4 = input.is(t0) ? a0 : t0;
3580 
3581  // exponent: floating point exponent value.
3582 
3583  Label allocated, slow;
3584  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3585  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3586  __ Branch(&allocated);
3587 
3588  // Slow case: Call the runtime system to do the number allocation.
3589  __ bind(&slow);
3590 
3591  CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3592  instr->context());
3593  // Set the pointer to the new heap number in tmp.
3594  if (!tmp1.is(v0))
3595  __ mov(tmp1, v0);
3596  // Restore input_reg after call to runtime.
3597  __ LoadFromSafepointRegisterSlot(input, input);
3598  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3599 
3600  __ bind(&allocated);
3601  // exponent: floating point exponent value.
3602  // tmp1: allocated heap number.
3603  __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3604  __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3607 
3608  __ StoreToSafepointRegisterSlot(tmp1, result);
3609  }
3610 
3611  __ bind(&done);
3612 }
3613 
3614 
3615 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3616  Register input = ToRegister(instr->value());
3617  Register result = ToRegister(instr->result());
3618  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3619  Label done;
3620  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3621  __ mov(result, input);
3622  __ subu(result, zero_reg, input);
3623  // Overflow if result is still negative, i.e. 0x80000000.
3624  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
3625  __ bind(&done);
3626 }
3627 
3628 
3629 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3630  // Class for deferred case.
3631  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3632  public:
3633  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3634  : LDeferredCode(codegen), instr_(instr) { }
3635  virtual void Generate() V8_OVERRIDE {
3636  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3637  }
3638  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3639  private:
3640  LMathAbs* instr_;
3641  };
3642 
3643  Representation r = instr->hydrogen()->value()->representation();
3644  if (r.IsDouble()) {
3645  FPURegister input = ToDoubleRegister(instr->value());
3646  FPURegister result = ToDoubleRegister(instr->result());
3647  __ abs_d(result, input);
3648  } else if (r.IsSmiOrInteger32()) {
3649  EmitIntegerMathAbs(instr);
3650  } else {
3651  // Representation is tagged.
3652  DeferredMathAbsTaggedHeapNumber* deferred =
3653  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3654  Register input = ToRegister(instr->value());
3655  // Smi check.
3656  __ JumpIfNotSmi(input, deferred->entry());
3657  // If smi, handle it directly.
3658  EmitIntegerMathAbs(instr);
3659  __ bind(deferred->exit());
3660  }
3661 }
3662 
3663 
3664 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3665  DoubleRegister input = ToDoubleRegister(instr->value());
3666  Register result = ToRegister(instr->result());
3667  Register scratch1 = scratch0();
3668  Register except_flag = ToRegister(instr->temp());
3669 
3670  __ EmitFPUTruncate(kRoundToMinusInf,
3671  result,
3672  input,
3673  scratch1,
3674  double_scratch0(),
3675  except_flag);
3676 
3677  // Deopt if the operation did not succeed.
3678  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3679 
3680  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3681  // Test for -0.
3682  Label done;
3683  __ Branch(&done, ne, result, Operand(zero_reg));
3684  __ mfc1(scratch1, input.high());
3685  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3686  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3687  __ bind(&done);
3688  }
3689 }
3690 
3691 
3692 void LCodeGen::DoMathRound(LMathRound* instr) {
3693  DoubleRegister input = ToDoubleRegister(instr->value());
3694  Register result = ToRegister(instr->result());
3695  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3696  Register scratch = scratch0();
3697  Label done, check_sign_on_zero;
3698 
3699  // Extract exponent bits.
3700  __ mfc1(result, input.high());
3701  __ Ext(scratch,
3702  result,
3705 
3706  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3707  Label skip1;
3708  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3709  __ mov(result, zero_reg);
3710  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3711  __ Branch(&check_sign_on_zero);
3712  } else {
3713  __ Branch(&done);
3714  }
3715  __ bind(&skip1);
3716 
3717  // The following conversion will not work with numbers
3718  // outside of ]-2^32, 2^32[.
3719  DeoptimizeIf(ge, instr->environment(), scratch,
3720  Operand(HeapNumber::kExponentBias + 32));
3721 
3722  // Save the original sign for later comparison.
3723  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3724 
3725  __ Move(double_scratch0(), 0.5);
3726  __ add_d(double_scratch0(), input, double_scratch0());
3727 
3728  // Check sign of the result: if the sign changed, the input
3729  // value was in ]0.5, 0[ and the result should be -0.
3730  __ mfc1(result, double_scratch0().high());
3731  __ Xor(result, result, Operand(scratch));
3732  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3733  // ARM uses 'mi' here, which is 'lt'
3734  DeoptimizeIf(lt, instr->environment(), result,
3735  Operand(zero_reg));
3736  } else {
3737  Label skip2;
3738  // ARM uses 'mi' here, which is 'lt'
3739  // Negating it results in 'ge'
3740  __ Branch(&skip2, ge, result, Operand(zero_reg));
3741  __ mov(result, zero_reg);
3742  __ Branch(&done);
3743  __ bind(&skip2);
3744  }
3745 
3746  Register except_flag = scratch;
3747  __ EmitFPUTruncate(kRoundToMinusInf,
3748  result,
3749  double_scratch0(),
3750  at,
3751  double_scratch1,
3752  except_flag);
3753 
3754  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3755 
3756  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3757  // Test for -0.
3758  __ Branch(&done, ne, result, Operand(zero_reg));
3759  __ bind(&check_sign_on_zero);
3760  __ mfc1(scratch, input.high());
3761  __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3762  DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3763  }
3764  __ bind(&done);
3765 }
3766 
3767 
3768 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3769  DoubleRegister input = ToDoubleRegister(instr->value());
3770  DoubleRegister result = ToDoubleRegister(instr->result());
3771  __ sqrt_d(result, input);
3772 }
3773 
3774 
3775 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3776  DoubleRegister input = ToDoubleRegister(instr->value());
3777  DoubleRegister result = ToDoubleRegister(instr->result());
3778  DoubleRegister temp = ToDoubleRegister(instr->temp());
3779 
3780  ASSERT(!input.is(result));
3781 
3782  // Note that according to ECMA-262 15.8.2.13:
3783  // Math.pow(-Infinity, 0.5) == Infinity
3784  // Math.sqrt(-Infinity) == NaN
3785  Label done;
3786  __ Move(temp, -V8_INFINITY);
3787  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3788  // Set up Infinity in the delay slot.
3789  // result is overwritten if the branch is not taken.
3790  __ neg_d(result, temp);
3791 
3792  // Add +0 to convert -0 to +0.
3793  __ add_d(result, input, kDoubleRegZero);
3794  __ sqrt_d(result, result);
3795  __ bind(&done);
3796 }
3797 
3798 
3799 void LCodeGen::DoPower(LPower* instr) {
3800  Representation exponent_type = instr->hydrogen()->right()->representation();
3801  // Having marked this as a call, we can use any registers.
3802  // Just make sure that the input/output registers are the expected ones.
3803  ASSERT(!instr->right()->IsDoubleRegister() ||
3804  ToDoubleRegister(instr->right()).is(f4));
3805  ASSERT(!instr->right()->IsRegister() ||
3806  ToRegister(instr->right()).is(a2));
3807  ASSERT(ToDoubleRegister(instr->left()).is(f2));
3808  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3809 
3810  if (exponent_type.IsSmi()) {
3811  MathPowStub stub(MathPowStub::TAGGED);
3812  __ CallStub(&stub);
3813  } else if (exponent_type.IsTagged()) {
3814  Label no_deopt;
3815  __ JumpIfSmi(a2, &no_deopt);
3817  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3818  DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
3819  __ bind(&no_deopt);
3820  MathPowStub stub(MathPowStub::TAGGED);
3821  __ CallStub(&stub);
3822  } else if (exponent_type.IsInteger32()) {
3823  MathPowStub stub(MathPowStub::INTEGER);
3824  __ CallStub(&stub);
3825  } else {
3826  ASSERT(exponent_type.IsDouble());
3827  MathPowStub stub(MathPowStub::DOUBLE);
3828  __ CallStub(&stub);
3829  }
3830 }
3831 
3832 
3833 void LCodeGen::DoMathExp(LMathExp* instr) {
3834  DoubleRegister input = ToDoubleRegister(instr->value());
3835  DoubleRegister result = ToDoubleRegister(instr->result());
3836  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3837  DoubleRegister double_scratch2 = double_scratch0();
3838  Register temp1 = ToRegister(instr->temp1());
3839  Register temp2 = ToRegister(instr->temp2());
3840 
3842  masm(), input, result, double_scratch1, double_scratch2,
3843  temp1, temp2, scratch0());
3844 }
3845 
3846 
3847 void LCodeGen::DoMathLog(LMathLog* instr) {
3848  __ PrepareCallCFunction(0, 1, scratch0());
3849  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3850  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3851  0, 1);
3852  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3853 }
3854 
3855 
3856 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3857  Register input = ToRegister(instr->value());
3858  Register result = ToRegister(instr->result());
3859  __ Clz(result, input);
3860 }
3861 
3862 
3863 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3864  ASSERT(ToRegister(instr->context()).is(cp));
3865  ASSERT(ToRegister(instr->function()).is(a1));
3866  ASSERT(instr->HasPointerMap());
3867 
3868  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3869  if (known_function.is_null()) {
3870  LPointerMap* pointers = instr->pointer_map();
3871  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3872  ParameterCount count(instr->arity());
3873  __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
3874  } else {
3875  CallKnownFunction(known_function,
3876  instr->hydrogen()->formal_parameter_count(),
3877  instr->arity(),
3878  instr,
3879  A1_CONTAINS_TARGET);
3880  }
3881 }
3882 
3883 
3884 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3885  ASSERT(ToRegister(instr->result()).is(v0));
3886 
3887  LPointerMap* pointers = instr->pointer_map();
3888  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3889 
3890  if (instr->target()->IsConstantOperand()) {
3891  LConstantOperand* target = LConstantOperand::cast(instr->target());
3892  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3893  generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3894  __ Call(code, RelocInfo::CODE_TARGET);
3895  } else {
3896  ASSERT(instr->target()->IsRegister());
3897  Register target = ToRegister(instr->target());
3898  generator.BeforeCall(__ CallSize(target));
3899  __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3900  __ Call(target);
3901  }
3902  generator.AfterCall();
3903 }
3904 
3905 
3906 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3907  ASSERT(ToRegister(instr->function()).is(a1));
3908  ASSERT(ToRegister(instr->result()).is(v0));
3909 
3910  if (instr->hydrogen()->pass_argument_count()) {
3911  __ li(a0, Operand(instr->arity()));
3912  }
3913 
3914  // Change context.
3916 
3917  // Load the code entry address
3919  __ Call(at);
3920 
3921  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3922 }
3923 
3924 
3925 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3926  ASSERT(ToRegister(instr->context()).is(cp));
3927  ASSERT(ToRegister(instr->function()).is(a1));
3928  ASSERT(ToRegister(instr->result()).is(v0));
3929 
3930  int arity = instr->arity();
3931  CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
3932  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3933 }
3934 
3935 
3936 void LCodeGen::DoCallNew(LCallNew* instr) {
3937  ASSERT(ToRegister(instr->context()).is(cp));
3938  ASSERT(ToRegister(instr->constructor()).is(a1));
3939  ASSERT(ToRegister(instr->result()).is(v0));
3940 
3941  __ li(a0, Operand(instr->arity()));
3942  // No cell in a2 for construct type feedback in optimized code
3943  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3944  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3945  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3946 }
3947 
3948 
3949 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3950  ASSERT(ToRegister(instr->context()).is(cp));
3951  ASSERT(ToRegister(instr->constructor()).is(a1));
3952  ASSERT(ToRegister(instr->result()).is(v0));
3953 
3954  __ li(a0, Operand(instr->arity()));
3955  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3956  ElementsKind kind = instr->hydrogen()->elements_kind();
3957  AllocationSiteOverrideMode override_mode =
3960  : DONT_OVERRIDE;
3961 
3962  if (instr->arity() == 0) {
3963  ArrayNoArgumentConstructorStub stub(kind, override_mode);
3964  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3965  } else if (instr->arity() == 1) {
3966  Label done;
3967  if (IsFastPackedElementsKind(kind)) {
3968  Label packed_case;
3969  // We might need a change here,
3970  // look at the first argument.
3971  __ lw(t1, MemOperand(sp, 0));
3972  __ Branch(&packed_case, eq, t1, Operand(zero_reg));
3973 
3974  ElementsKind holey_kind = GetHoleyElementsKind(kind);
3975  ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
3976  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3977  __ jmp(&done);
3978  __ bind(&packed_case);
3979  }
3980 
3981  ArraySingleArgumentConstructorStub stub(kind, override_mode);
3982  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3983  __ bind(&done);
3984  } else {
3985  ArrayNArgumentsConstructorStub stub(kind, override_mode);
3986  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3987  }
3988 }
3989 
3990 
3991 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3992  CallRuntime(instr->function(), instr->arity(), instr);
3993 }
3994 
3995 
3996 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3997  Register function = ToRegister(instr->function());
3998  Register code_object = ToRegister(instr->code_object());
3999  __ Addu(code_object, code_object,
4000  Operand(Code::kHeaderSize - kHeapObjectTag));
4001  __ sw(code_object,
4003 }
4004 
4005 
4006 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4007  Register result = ToRegister(instr->result());
4008  Register base = ToRegister(instr->base_object());
4009  if (instr->offset()->IsConstantOperand()) {
4010  LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4011  __ Addu(result, base, Operand(ToInteger32(offset)));
4012  } else {
4013  Register offset = ToRegister(instr->offset());
4014  __ Addu(result, base, offset);
4015  }
4016 }
4017 
4018 
4019 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4020  Representation representation = instr->representation();
4021 
4022  Register object = ToRegister(instr->object());
4023  Register scratch = scratch0();
4024  HObjectAccess access = instr->hydrogen()->access();
4025  int offset = access.offset();
4026 
4027  if (access.IsExternalMemory()) {
4028  Register value = ToRegister(instr->value());
4029  MemOperand operand = MemOperand(object, offset);
4030  __ Store(value, operand, representation);
4031  return;
4032  }
4033 
4034  Handle<Map> transition = instr->transition();
4035  SmiCheck check_needed =
4036  instr->hydrogen()->value()->IsHeapObject()
4038 
4039  ASSERT(!(representation.IsSmi() &&
4040  instr->value()->IsConstantOperand() &&
4041  !IsSmi(LConstantOperand::cast(instr->value()))));
4042  if (representation.IsHeapObject()) {
4043  Register value = ToRegister(instr->value());
4044  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4045  __ SmiTst(value, scratch);
4046  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
4047 
4048  // We know that value is a smi now, so we can omit the check below.
4049  check_needed = OMIT_SMI_CHECK;
4050  }
4051  } else if (representation.IsDouble()) {
4052  ASSERT(transition.is_null());
4053  ASSERT(access.IsInobject());
4054  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4055  DoubleRegister value = ToDoubleRegister(instr->value());
4056  __ sdc1(value, FieldMemOperand(object, offset));
4057  return;
4058  }
4059 
4060  if (!transition.is_null()) {
4061  __ li(scratch, Operand(transition));
4062  __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4063  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4064  Register temp = ToRegister(instr->temp());
4065  // Update the write barrier for the map field.
4066  __ RecordWriteField(object,
4068  scratch,
4069  temp,
4070  GetRAState(),
4071  kSaveFPRegs,
4073  OMIT_SMI_CHECK);
4074  }
4075  }
4076 
4077  // Do the store.
4078  Register value = ToRegister(instr->value());
4079  if (access.IsInobject()) {
4080  MemOperand operand = FieldMemOperand(object, offset);
4081  __ Store(value, operand, representation);
4082  if (instr->hydrogen()->NeedsWriteBarrier()) {
4083  // Update the write barrier for the object for in-object properties.
4084  __ RecordWriteField(object,
4085  offset,
4086  value,
4087  scratch,
4088  GetRAState(),
4089  kSaveFPRegs,
4091  check_needed);
4092  }
4093  } else {
4094  __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4095  MemOperand operand = FieldMemOperand(scratch, offset);
4096  __ Store(value, operand, representation);
4097  if (instr->hydrogen()->NeedsWriteBarrier()) {
4098  // Update the write barrier for the properties array.
4099  // object is used as a scratch register.
4100  __ RecordWriteField(scratch,
4101  offset,
4102  value,
4103  object,
4104  GetRAState(),
4105  kSaveFPRegs,
4107  check_needed);
4108  }
4109  }
4110 }
4111 
4112 
4113 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4114  ASSERT(ToRegister(instr->context()).is(cp));
4115  ASSERT(ToRegister(instr->object()).is(a1));
4116  ASSERT(ToRegister(instr->value()).is(a0));
4117 
4118  // Name is always in a2.
4119  __ li(a2, Operand(instr->name()));
4120  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4121  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4122 }
4123 
4124 
4125 void LCodeGen::ApplyCheckIf(Condition condition,
4126  LBoundsCheck* check,
4127  Register src1,
4128  const Operand& src2) {
4129  if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4130  Label done;
4131  __ Branch(&done, NegateCondition(condition), src1, src2);
4132  __ stop("eliminated bounds check failed");
4133  __ bind(&done);
4134  } else {
4135  DeoptimizeIf(condition, check->environment(), src1, src2);
4136  }
4137 }
4138 
4139 
4140 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4141  if (instr->hydrogen()->skip_check()) return;
4142 
4143  Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
4144  if (instr->index()->IsConstantOperand()) {
4145  int constant_index =
4146  ToInteger32(LConstantOperand::cast(instr->index()));
4147  if (instr->hydrogen()->length()->representation().IsSmi()) {
4148  __ li(at, Operand(Smi::FromInt(constant_index)));
4149  } else {
4150  __ li(at, Operand(constant_index));
4151  }
4152  ApplyCheckIf(condition,
4153  instr,
4154  at,
4155  Operand(ToRegister(instr->length())));
4156  } else {
4157  ApplyCheckIf(condition,
4158  instr,
4159  ToRegister(instr->index()),
4160  Operand(ToRegister(instr->length())));
4161  }
4162 }
4163 
4164 
4165 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4166  Register external_pointer = ToRegister(instr->elements());
4167  Register key = no_reg;
4168  ElementsKind elements_kind = instr->elements_kind();
4169  bool key_is_constant = instr->key()->IsConstantOperand();
4170  int constant_key = 0;
4171  if (key_is_constant) {
4172  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4173  if (constant_key & 0xF0000000) {
4174  Abort(kArrayIndexConstantValueTooBig);
4175  }
4176  } else {
4177  key = ToRegister(instr->key());
4178  }
4179  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4180  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4181  ? (element_size_shift - kSmiTagSize) : element_size_shift;
4182  int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
4184  : 0;
4185 
4186  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4187  elements_kind == FLOAT32_ELEMENTS ||
4188  elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4189  elements_kind == FLOAT64_ELEMENTS) {
4190  int base_offset =
4191  (instr->additional_index() << element_size_shift) + additional_offset;
4192  Register address = scratch0();
4193  FPURegister value(ToDoubleRegister(instr->value()));
4194  if (key_is_constant) {
4195  if (constant_key != 0) {
4196  __ Addu(address, external_pointer,
4197  Operand(constant_key << element_size_shift));
4198  } else {
4199  address = external_pointer;
4200  }
4201  } else {
4202  __ sll(address, key, shift_size);
4203  __ Addu(address, external_pointer, address);
4204  }
4205 
4206  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4207  elements_kind == FLOAT32_ELEMENTS) {
4208  __ cvt_s_d(double_scratch0(), value);
4209  __ swc1(double_scratch0(), MemOperand(address, base_offset));
4210  } else { // Storing doubles, not floats.
4211  __ sdc1(value, MemOperand(address, base_offset));
4212  }
4213  } else {
4214  Register value(ToRegister(instr->value()));
4215  MemOperand mem_operand = PrepareKeyedOperand(
4216  key, external_pointer, key_is_constant, constant_key,
4217  element_size_shift, shift_size,
4218  instr->additional_index(), additional_offset);
4219  switch (elements_kind) {
4223  case UINT8_ELEMENTS:
4225  case INT8_ELEMENTS:
4226  __ sb(value, mem_operand);
4227  break;
4230  case INT16_ELEMENTS:
4231  case UINT16_ELEMENTS:
4232  __ sh(value, mem_operand);
4233  break;
4236  case INT32_ELEMENTS:
4237  case UINT32_ELEMENTS:
4238  __ sw(value, mem_operand);
4239  break;
4240  case FLOAT32_ELEMENTS:
4241  case FLOAT64_ELEMENTS:
4244  case FAST_DOUBLE_ELEMENTS:
4245  case FAST_ELEMENTS:
4246  case FAST_SMI_ELEMENTS:
4248  case FAST_HOLEY_ELEMENTS:
4250  case DICTIONARY_ELEMENTS:
4252  UNREACHABLE();
4253  break;
4254  }
4255  }
4256 }
4257 
4258 
4259 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4260  DoubleRegister value = ToDoubleRegister(instr->value());
4261  Register elements = ToRegister(instr->elements());
4262  Register scratch = scratch0();
4263  DoubleRegister double_scratch = double_scratch0();
4264  bool key_is_constant = instr->key()->IsConstantOperand();
4265  Label not_nan, done;
4266 
4267  // Calculate the effective address of the slot in the array to store the
4268  // double value.
4269  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4270  if (key_is_constant) {
4271  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4272  if (constant_key & 0xF0000000) {
4273  Abort(kArrayIndexConstantValueTooBig);
4274  }
4275  __ Addu(scratch, elements,
4276  Operand((constant_key << element_size_shift) +
4278  } else {
4279  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4280  ? (element_size_shift - kSmiTagSize) : element_size_shift;
4281  __ Addu(scratch, elements,
4283  __ sll(at, ToRegister(instr->key()), shift_size);
4284  __ Addu(scratch, scratch, at);
4285  }
4286 
4287  if (instr->NeedsCanonicalization()) {
4288  Label is_nan;
4289  // Check for NaN. All NaNs must be canonicalized.
4290  __ BranchF(NULL, &is_nan, eq, value, value);
4291  __ Branch(&not_nan);
4292 
4293  // Only load canonical NaN if the comparison above set the overflow.
4294  __ bind(&is_nan);
4295  __ LoadRoot(at, Heap::kNanValueRootIndex);
4296  __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
4297  __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
4298  element_size_shift));
4299  __ Branch(&done);
4300  }
4301 
4302  __ bind(&not_nan);
4303  __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
4304  element_size_shift));
4305  __ bind(&done);
4306 }
4307 
4308 
4309 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4310  Register value = ToRegister(instr->value());
4311  Register elements = ToRegister(instr->elements());
4312  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4313  : no_reg;
4314  Register scratch = scratch0();
4315  Register store_base = scratch;
4316  int offset = 0;
4317 
4318  // Do the store.
4319  if (instr->key()->IsConstantOperand()) {
4320  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4321  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4322  offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4323  instr->additional_index());
4324  store_base = elements;
4325  } else {
4326  // Even though the HLoadKeyed instruction forces the input
4327  // representation for the key to be an integer, the input gets replaced
4328  // during bound check elimination with the index argument to the bounds
4329  // check, which can be tagged, so that case must be handled here, too.
4330  if (instr->hydrogen()->key()->representation().IsSmi()) {
4331  __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
4332  __ addu(scratch, elements, scratch);
4333  } else {
4334  __ sll(scratch, key, kPointerSizeLog2);
4335  __ addu(scratch, elements, scratch);
4336  }
4337  offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4338  }
4339  __ sw(value, FieldMemOperand(store_base, offset));
4340 
4341  if (instr->hydrogen()->NeedsWriteBarrier()) {
4342  SmiCheck check_needed =
4343  instr->hydrogen()->value()->IsHeapObject()
4345  // Compute address of modified element and store it into key register.
4346  __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
4347  __ RecordWrite(elements,
4348  key,
4349  value,
4350  GetRAState(),
4351  kSaveFPRegs,
4353  check_needed);
4354  }
4355 }
4356 
4357 
4358 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4359  // By cases: external, fast double
4360  if (instr->is_typed_elements()) {
4361  DoStoreKeyedExternalArray(instr);
4362  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4363  DoStoreKeyedFixedDoubleArray(instr);
4364  } else {
4365  DoStoreKeyedFixedArray(instr);
4366  }
4367 }
4368 
4369 
4370 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4371  ASSERT(ToRegister(instr->context()).is(cp));
4372  ASSERT(ToRegister(instr->object()).is(a2));
4373  ASSERT(ToRegister(instr->key()).is(a1));
4374  ASSERT(ToRegister(instr->value()).is(a0));
4375 
4376  Handle<Code> ic = (instr->strict_mode() == STRICT)
4377  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4378  : isolate()->builtins()->KeyedStoreIC_Initialize();
4379  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4380 }
4381 
4382 
4383 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4384  Register object_reg = ToRegister(instr->object());
4385  Register scratch = scratch0();
4386 
4387  Handle<Map> from_map = instr->original_map();
4388  Handle<Map> to_map = instr->transitioned_map();
4389  ElementsKind from_kind = instr->from_kind();
4390  ElementsKind to_kind = instr->to_kind();
4391 
4392  Label not_applicable;
4393  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4394  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4395 
4396  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4397  Register new_map_reg = ToRegister(instr->new_map_temp());
4398  __ li(new_map_reg, Operand(to_map));
4399  __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4400  // Write barrier.
4401  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4402  scratch, GetRAState(), kDontSaveFPRegs);
4403  } else {
4404  ASSERT(ToRegister(instr->context()).is(cp));
4405  PushSafepointRegistersScope scope(
4406  this, Safepoint::kWithRegistersAndDoubles);
4407  __ mov(a0, object_reg);
4408  __ li(a1, Operand(to_map));
4409  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4410  TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
4411  __ CallStub(&stub);
4412  RecordSafepointWithRegistersAndDoubles(
4413  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4414  }
4415  __ bind(&not_applicable);
4416 }
4417 
4418 
4419 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4420  Register object = ToRegister(instr->object());
4421  Register temp = ToRegister(instr->temp());
4422  Label no_memento_found;
4423  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4424  ne, &no_memento_found);
4425  DeoptimizeIf(al, instr->environment());
4426  __ bind(&no_memento_found);
4427 }
4428 
4429 
4430 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4431  ASSERT(ToRegister(instr->context()).is(cp));
4432  ASSERT(ToRegister(instr->left()).is(a1));
4433  ASSERT(ToRegister(instr->right()).is(a0));
4434  StringAddStub stub(instr->hydrogen()->flags(),
4435  instr->hydrogen()->pretenure_flag());
4436  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4437 }
4438 
4439 
4440 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4441  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4442  public:
4443  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4444  : LDeferredCode(codegen), instr_(instr) { }
4445  virtual void Generate() V8_OVERRIDE {
4446  codegen()->DoDeferredStringCharCodeAt(instr_);
4447  }
4448  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4449  private:
4450  LStringCharCodeAt* instr_;
4451  };
4452 
4453  DeferredStringCharCodeAt* deferred =
4454  new(zone()) DeferredStringCharCodeAt(this, instr);
4456  ToRegister(instr->string()),
4457  ToRegister(instr->index()),
4458  ToRegister(instr->result()),
4459  deferred->entry());
4460  __ bind(deferred->exit());
4461 }
4462 
4463 
4464 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4465  Register string = ToRegister(instr->string());
4466  Register result = ToRegister(instr->result());
4467  Register scratch = scratch0();
4468 
4469  // TODO(3095996): Get rid of this. For now, we need to make the
4470  // result register contain a valid pointer because it is already
4471  // contained in the register pointer map.
4472  __ mov(result, zero_reg);
4473 
4474  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4475  __ push(string);
4476  // Push the index as a smi. This is safe because of the checks in
4477  // DoStringCharCodeAt above.
4478  if (instr->index()->IsConstantOperand()) {
4479  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4480  __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4481  __ push(scratch);
4482  } else {
4483  Register index = ToRegister(instr->index());
4484  __ SmiTag(index);
4485  __ push(index);
4486  }
4487  CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
4488  instr->context());
4489  __ AssertSmi(v0);
4490  __ SmiUntag(v0);
4491  __ StoreToSafepointRegisterSlot(v0, result);
4492 }
4493 
4494 
4495 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4496  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4497  public:
4498  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4499  : LDeferredCode(codegen), instr_(instr) { }
4500  virtual void Generate() V8_OVERRIDE {
4501  codegen()->DoDeferredStringCharFromCode(instr_);
4502  }
4503  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4504  private:
4505  LStringCharFromCode* instr_;
4506  };
4507 
4508  DeferredStringCharFromCode* deferred =
4509  new(zone()) DeferredStringCharFromCode(this, instr);
4510 
4511  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4512  Register char_code = ToRegister(instr->char_code());
4513  Register result = ToRegister(instr->result());
4514  Register scratch = scratch0();
4515  ASSERT(!char_code.is(result));
4516 
4517  __ Branch(deferred->entry(), hi,
4518  char_code, Operand(String::kMaxOneByteCharCode));
4519  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4520  __ sll(scratch, char_code, kPointerSizeLog2);
4521  __ Addu(result, result, scratch);
4522  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4523  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4524  __ Branch(deferred->entry(), eq, result, Operand(scratch));
4525  __ bind(deferred->exit());
4526 }
4527 
4528 
4529 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4530  Register char_code = ToRegister(instr->char_code());
4531  Register result = ToRegister(instr->result());
4532 
4533  // TODO(3095996): Get rid of this. For now, we need to make the
4534  // result register contain a valid pointer because it is already
4535  // contained in the register pointer map.
4536  __ mov(result, zero_reg);
4537 
4538  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4539  __ SmiTag(char_code);
4540  __ push(char_code);
4541  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4542  __ StoreToSafepointRegisterSlot(v0, result);
4543 }
4544 
4545 
4546 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4547  LOperand* input = instr->value();
4548  ASSERT(input->IsRegister() || input->IsStackSlot());
4549  LOperand* output = instr->result();
4550  ASSERT(output->IsDoubleRegister());
4551  FPURegister single_scratch = double_scratch0().low();
4552  if (input->IsStackSlot()) {
4553  Register scratch = scratch0();
4554  __ lw(scratch, ToMemOperand(input));
4555  __ mtc1(scratch, single_scratch);
4556  } else {
4557  __ mtc1(ToRegister(input), single_scratch);
4558  }
4559  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4560 }
4561 
4562 
4563 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4564  LOperand* input = instr->value();
4565  LOperand* output = instr->result();
4566 
4567  FPURegister dbl_scratch = double_scratch0();
4568  __ mtc1(ToRegister(input), dbl_scratch);
4569  __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4570 }
4571 
4572 
4573 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4574  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4575  public:
4576  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4577  : LDeferredCode(codegen), instr_(instr) { }
4578  virtual void Generate() V8_OVERRIDE {
4579  codegen()->DoDeferredNumberTagIU(instr_,
4580  instr_->value(),
4581  instr_->temp1(),
4582  instr_->temp2(),
4583  SIGNED_INT32);
4584  }
4585  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4586  private:
4587  LNumberTagI* instr_;
4588  };
4589 
4590  Register src = ToRegister(instr->value());
4591  Register dst = ToRegister(instr->result());
4592  Register overflow = scratch0();
4593 
4594  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4595  __ SmiTagCheckOverflow(dst, src, overflow);
4596  __ BranchOnOverflow(deferred->entry(), overflow);
4597  __ bind(deferred->exit());
4598 }
4599 
4600 
4601 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4602  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4603  public:
4604  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4605  : LDeferredCode(codegen), instr_(instr) { }
4606  virtual void Generate() V8_OVERRIDE {
4607  codegen()->DoDeferredNumberTagIU(instr_,
4608  instr_->value(),
4609  instr_->temp1(),
4610  instr_->temp2(),
4611  UNSIGNED_INT32);
4612  }
4613  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4614  private:
4615  LNumberTagU* instr_;
4616  };
4617 
4618  Register input = ToRegister(instr->value());
4619  Register result = ToRegister(instr->result());
4620 
4621  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4622  __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4623  __ SmiTag(result, input);
4624  __ bind(deferred->exit());
4625 }
4626 
4627 
4628 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4629  LOperand* value,
4630  LOperand* temp1,
4631  LOperand* temp2,
4632  IntegerSignedness signedness) {
4633  Label done, slow;
4634  Register src = ToRegister(value);
4635  Register dst = ToRegister(instr->result());
4636  Register tmp1 = scratch0();
4637  Register tmp2 = ToRegister(temp1);
4638  Register tmp3 = ToRegister(temp2);
4639  DoubleRegister dbl_scratch = double_scratch0();
4640 
4641  if (signedness == SIGNED_INT32) {
4642  // There was overflow, so bits 30 and 31 of the original integer
4643  // disagree. Try to allocate a heap number in new space and store
4644  // the value in there. If that fails, call the runtime system.
4645  if (dst.is(src)) {
4646  __ SmiUntag(src, dst);
4647  __ Xor(src, src, Operand(0x80000000));
4648  }
4649  __ mtc1(src, dbl_scratch);
4650  __ cvt_d_w(dbl_scratch, dbl_scratch);
4651  } else {
4652  __ mtc1(src, dbl_scratch);
4653  __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4654  }
4655 
4656  if (FLAG_inline_new) {
4657  __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4658  __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4659  __ Branch(&done);
4660  }
4661 
4662  // Slow case: Call the runtime system to do the number allocation.
4663  __ bind(&slow);
4664  {
4665  // TODO(3095996): Put a valid pointer value in the stack slot where the
4666  // result register is stored, as this register is in the pointer map, but
4667  // contains an integer value.
4668  __ mov(dst, zero_reg);
4669 
4670  // Preserve the value of all registers.
4671  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4672 
4673  // NumberTagI and NumberTagD use the context from the frame, rather than
4674  // the environment's HContext or HInlinedContext value.
4675  // They only call Runtime::kHiddenAllocateHeapNumber.
4676  // The corresponding HChange instructions are added in a phase that does
4677  // not have easy access to the local context.
4679  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4680  RecordSafepointWithRegisters(
4681  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4682  __ Subu(v0, v0, kHeapObjectTag);
4683  __ StoreToSafepointRegisterSlot(v0, dst);
4684  }
4685 
4686 
4687  // Done. Put the value in dbl_scratch into the value of the allocated heap
4688  // number.
4689  __ bind(&done);
4690  __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4691  __ Addu(dst, dst, kHeapObjectTag);
4692 }
4693 
4694 
4695 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4696  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4697  public:
4698  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4699  : LDeferredCode(codegen), instr_(instr) { }
4700  virtual void Generate() V8_OVERRIDE {
4701  codegen()->DoDeferredNumberTagD(instr_);
4702  }
4703  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4704  private:
4705  LNumberTagD* instr_;
4706  };
4707 
4708  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4709  Register scratch = scratch0();
4710  Register reg = ToRegister(instr->result());
4711  Register temp1 = ToRegister(instr->temp());
4712  Register temp2 = ToRegister(instr->temp2());
4713 
4714  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4715  if (FLAG_inline_new) {
4716  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4717  // We want the untagged address first for performance
4718  __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4719  DONT_TAG_RESULT);
4720  } else {
4721  __ Branch(deferred->entry());
4722  }
4723  __ bind(deferred->exit());
4724  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4725  // Now that we have finished with the object's real address tag it
4726  __ Addu(reg, reg, kHeapObjectTag);
4727 }
4728 
4729 
4730 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4731  // TODO(3095996): Get rid of this. For now, we need to make the
4732  // result register contain a valid pointer because it is already
4733  // contained in the register pointer map.
4734  Register reg = ToRegister(instr->result());
4735  __ mov(reg, zero_reg);
4736 
4737  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4738  // NumberTagI and NumberTagD use the context from the frame, rather than
4739  // the environment's HContext or HInlinedContext value.
4740  // They only call Runtime::kHiddenAllocateHeapNumber.
4741  // The corresponding HChange instructions are added in a phase that does
4742  // not have easy access to the local context.
4744  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4745  RecordSafepointWithRegisters(
4746  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4747  __ Subu(v0, v0, kHeapObjectTag);
4748  __ StoreToSafepointRegisterSlot(v0, reg);
4749 }
4750 
4751 
4752 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4753  HChange* hchange = instr->hydrogen();
4754  Register input = ToRegister(instr->value());
4755  Register output = ToRegister(instr->result());
4756  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4757  hchange->value()->CheckFlag(HValue::kUint32)) {
4758  __ And(at, input, Operand(0xc0000000));
4759  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
4760  }
4761  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4762  !hchange->value()->CheckFlag(HValue::kUint32)) {
4763  __ SmiTagCheckOverflow(output, input, at);
4764  DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
4765  } else {
4766  __ SmiTag(output, input);
4767  }
4768 }
4769 
4770 
4771 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4772  Register scratch = scratch0();
4773  Register input = ToRegister(instr->value());
4774  Register result = ToRegister(instr->result());
4775  if (instr->needs_check()) {
4777  // If the input is a HeapObject, value of scratch won't be zero.
4778  __ And(scratch, input, Operand(kHeapObjectTag));
4779  __ SmiUntag(result, input);
4780  DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
4781  } else {
4782  __ SmiUntag(result, input);
4783  }
4784 }
4785 
4786 
4787 void LCodeGen::EmitNumberUntagD(Register input_reg,
4788  DoubleRegister result_reg,
4789  bool can_convert_undefined_to_nan,
4790  bool deoptimize_on_minus_zero,
4791  LEnvironment* env,
4792  NumberUntagDMode mode) {
4793  Register scratch = scratch0();
4794  Label convert, load_smi, done;
4795  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4796  // Smi check.
4797  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4798  // Heap number map check.
4799  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4800  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4801  if (can_convert_undefined_to_nan) {
4802  __ Branch(&convert, ne, scratch, Operand(at));
4803  } else {
4804  DeoptimizeIf(ne, env, scratch, Operand(at));
4805  }
4806  // Load heap number.
4807  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4808  if (deoptimize_on_minus_zero) {
4809  __ mfc1(at, result_reg.low());
4810  __ Branch(&done, ne, at, Operand(zero_reg));
4811  __ mfc1(scratch, result_reg.high());
4812  DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4813  }
4814  __ Branch(&done);
4815  if (can_convert_undefined_to_nan) {
4816  __ bind(&convert);
4817  // Convert undefined (and hole) to NaN.
4818  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4819  DeoptimizeIf(ne, env, input_reg, Operand(at));
4820  __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4821  __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4822  __ Branch(&done);
4823  }
4824  } else {
4825  __ SmiUntag(scratch, input_reg);
4827  }
4828  // Smi to double register conversion
4829  __ bind(&load_smi);
4830  // scratch: untagged value of input_reg
4831  __ mtc1(scratch, result_reg);
4832  __ cvt_d_w(result_reg, result_reg);
4833  __ bind(&done);
4834 }
4835 
4836 
4837 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4838  Register input_reg = ToRegister(instr->value());
4839  Register scratch1 = scratch0();
4840  Register scratch2 = ToRegister(instr->temp());
4841  DoubleRegister double_scratch = double_scratch0();
4842  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4843 
4844  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4845  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4846 
4847  Label done;
4848 
4849  // The input is a tagged HeapObject.
4850  // Heap number map check.
4851  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4852  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4853  // This 'at' value and scratch1 map value are used for tests in both clauses
4854  // of the if.
4855 
4856  if (instr->truncating()) {
4857  // Performs a truncating conversion of a floating point number as used by
4858  // the JS bitwise operations.
4859  Label no_heap_number, check_bools, check_false;
4860  // Check HeapNumber map.
4861  __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4862  __ mov(scratch2, input_reg); // In delay slot.
4863  __ TruncateHeapNumberToI(input_reg, scratch2);
4864  __ Branch(&done);
4865 
4866  // Check for Oddballs. Undefined/False is converted to zero and True to one
4867  // for truncating conversions.
4868  __ bind(&no_heap_number);
4869  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4870  __ Branch(&check_bools, ne, input_reg, Operand(at));
4871  ASSERT(ToRegister(instr->result()).is(input_reg));
4872  __ Branch(USE_DELAY_SLOT, &done);
4873  __ mov(input_reg, zero_reg); // In delay slot.
4874 
4875  __ bind(&check_bools);
4876  __ LoadRoot(at, Heap::kTrueValueRootIndex);
4877  __ Branch(&check_false, ne, scratch2, Operand(at));
4878  __ Branch(USE_DELAY_SLOT, &done);
4879  __ li(input_reg, Operand(1)); // In delay slot.
4880 
4881  __ bind(&check_false);
4882  __ LoadRoot(at, Heap::kFalseValueRootIndex);
4883  DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
4884  __ Branch(USE_DELAY_SLOT, &done);
4885  __ mov(input_reg, zero_reg); // In delay slot.
4886  } else {
4887  // Deoptimize if we don't have a heap number.
4888  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
4889 
4890  // Load the double value.
4891  __ ldc1(double_scratch,
4893 
4894  Register except_flag = scratch2;
4895  __ EmitFPUTruncate(kRoundToZero,
4896  input_reg,
4897  double_scratch,
4898  scratch1,
4899  double_scratch2,
4900  except_flag,
4902 
4903  // Deopt if the operation did not succeed.
4904  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4905 
4906  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4907  __ Branch(&done, ne, input_reg, Operand(zero_reg));
4908 
4909  __ mfc1(scratch1, double_scratch.high());
4910  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4911  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4912  }
4913  }
4914  __ bind(&done);
4915 }
4916 
4917 
4918 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4919  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
4920  public:
4921  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4922  : LDeferredCode(codegen), instr_(instr) { }
4923  virtual void Generate() V8_OVERRIDE {
4924  codegen()->DoDeferredTaggedToI(instr_);
4925  }
4926  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4927  private:
4928  LTaggedToI* instr_;
4929  };
4930 
4931  LOperand* input = instr->value();
4932  ASSERT(input->IsRegister());
4933  ASSERT(input->Equals(instr->result()));
4934 
4935  Register input_reg = ToRegister(input);
4936 
4937  if (instr->hydrogen()->value()->representation().IsSmi()) {
4938  __ SmiUntag(input_reg);
4939  } else {
4940  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4941 
4942  // Let the deferred code handle the HeapObject case.
4943  __ JumpIfNotSmi(input_reg, deferred->entry());
4944 
4945  // Smi to int32 conversion.
4946  __ SmiUntag(input_reg);
4947  __ bind(deferred->exit());
4948  }
4949 }
4950 
4951 
4952 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4953  LOperand* input = instr->value();
4954  ASSERT(input->IsRegister());
4955  LOperand* result = instr->result();
4956  ASSERT(result->IsDoubleRegister());
4957 
4958  Register input_reg = ToRegister(input);
4959  DoubleRegister result_reg = ToDoubleRegister(result);
4960 
4961  HValue* value = instr->hydrogen()->value();
4962  NumberUntagDMode mode = value->representation().IsSmi()
4964 
4965  EmitNumberUntagD(input_reg, result_reg,
4966  instr->hydrogen()->can_convert_undefined_to_nan(),
4967  instr->hydrogen()->deoptimize_on_minus_zero(),
4968  instr->environment(),
4969  mode);
4970 }
4971 
4972 
4973 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4974  Register result_reg = ToRegister(instr->result());
4975  Register scratch1 = scratch0();
4976  DoubleRegister double_input = ToDoubleRegister(instr->value());
4977 
4978  if (instr->truncating()) {
4979  __ TruncateDoubleToI(result_reg, double_input);
4980  } else {
4981  Register except_flag = LCodeGen::scratch1();
4982 
4983  __ EmitFPUTruncate(kRoundToMinusInf,
4984  result_reg,
4985  double_input,
4986  scratch1,
4987  double_scratch0(),
4988  except_flag,
4990 
4991  // Deopt if the operation did not succeed (except_flag != 0).
4992  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4993 
4994  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4995  Label done;
4996  __ Branch(&done, ne, result_reg, Operand(zero_reg));
4997  __ mfc1(scratch1, double_input.high());
4998  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4999  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
5000  __ bind(&done);
5001  }
5002  }
5003 }
5004 
5005 
5006 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5007  Register result_reg = ToRegister(instr->result());
5008  Register scratch1 = LCodeGen::scratch0();
5009  DoubleRegister double_input = ToDoubleRegister(instr->value());
5010 
5011  if (instr->truncating()) {
5012  __ TruncateDoubleToI(result_reg, double_input);
5013  } else {
5014  Register except_flag = LCodeGen::scratch1();
5015 
5016  __ EmitFPUTruncate(kRoundToMinusInf,
5017  result_reg,
5018  double_input,
5019  scratch1,
5020  double_scratch0(),
5021  except_flag,
5023 
5024  // Deopt if the operation did not succeed (except_flag != 0).
5025  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
5026 
5027  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5028  Label done;
5029  __ Branch(&done, ne, result_reg, Operand(zero_reg));
5030  __ mfc1(scratch1, double_input.high());
5031  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5032  DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
5033  __ bind(&done);
5034  }
5035  }
5036  __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5037  DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
5038 }
5039 
5040 
5041 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5042  LOperand* input = instr->value();
5043  __ SmiTst(ToRegister(input), at);
5044  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
5045 }
5046 
5047 
5048 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5049  if (!instr->hydrogen()->value()->IsHeapObject()) {
5050  LOperand* input = instr->value();
5051  __ SmiTst(ToRegister(input), at);
5052  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5053  }
5054 }
5055 
5056 
5057 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5058  Register input = ToRegister(instr->value());
5059  Register scratch = scratch0();
5060 
5061  __ GetObjectType(input, scratch, scratch);
5062 
5063  if (instr->hydrogen()->is_interval_check()) {
5064  InstanceType first;
5065  InstanceType last;
5066  instr->hydrogen()->GetCheckInterval(&first, &last);
5067 
5068  // If there is only one type in the interval check for equality.
5069  if (first == last) {
5070  DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
5071  } else {
5072  DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
5073  // Omit check for the last type.
5074  if (last != LAST_TYPE) {
5075  DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
5076  }
5077  }
5078  } else {
5079  uint8_t mask;
5080  uint8_t tag;
5081  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5082 
5083  if (IsPowerOf2(mask)) {
5084  ASSERT(tag == 0 || IsPowerOf2(tag));
5085  __ And(at, scratch, mask);
5086  DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
5087  at, Operand(zero_reg));
5088  } else {
5089  __ And(scratch, scratch, Operand(mask));
5090  DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
5091  }
5092  }
5093 }
5094 
5095 
5096 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5097  Register reg = ToRegister(instr->value());
5098  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5100  if (isolate()->heap()->InNewSpace(*object)) {
5101  Register reg = ToRegister(instr->value());
5102  Handle<Cell> cell = isolate()->factory()->NewCell(object);
5103  __ li(at, Operand(Handle<Object>(cell)));
5104  __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
5105  DeoptimizeIf(ne, instr->environment(), reg,
5106  Operand(at));
5107  } else {
5108  DeoptimizeIf(ne, instr->environment(), reg,
5109  Operand(object));
5110  }
5111 }
5112 
5113 
5114 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5115  {
5116  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5117  __ push(object);
5118  __ mov(cp, zero_reg);
5119  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5120  RecordSafepointWithRegisters(
5121  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5122  __ StoreToSafepointRegisterSlot(v0, scratch0());
5123  }
5124  __ SmiTst(scratch0(), at);
5125  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5126 }
5127 
5128 
5129 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5130  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5131  public:
5132  DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5133  : LDeferredCode(codegen), instr_(instr), object_(object) {
5134  SetExit(check_maps());
5135  }
5136  virtual void Generate() V8_OVERRIDE {
5137  codegen()->DoDeferredInstanceMigration(instr_, object_);
5138  }
5139  Label* check_maps() { return &check_maps_; }
5140  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5141  private:
5142  LCheckMaps* instr_;
5143  Label check_maps_;
5144  Register object_;
5145  };
5146 
5147  if (instr->hydrogen()->CanOmitMapChecks()) return;
5148  Register map_reg = scratch0();
5149  LOperand* input = instr->value();
5150  ASSERT(input->IsRegister());
5151  Register reg = ToRegister(input);
5152  __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5153 
5154  DeferredCheckMaps* deferred = NULL;
5155  if (instr->hydrogen()->has_migration_target()) {
5156  deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5157  __ bind(deferred->check_maps());
5158  }
5159 
5160  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5161  Label success;
5162  for (int i = 0; i < map_set.size() - 1; i++) {
5163  Handle<Map> map = map_set.at(i).handle();
5164  __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5165  }
5166  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5167  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5168  if (instr->hydrogen()->has_migration_target()) {
5169  __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5170  } else {
5171  DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
5172  }
5173 
5174  __ bind(&success);
5175 }
5176 
5177 
5178 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5179  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5180  Register result_reg = ToRegister(instr->result());
5181  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5182  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5183 }
5184 
5185 
5186 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5187  Register unclamped_reg = ToRegister(instr->unclamped());
5188  Register result_reg = ToRegister(instr->result());
5189  __ ClampUint8(result_reg, unclamped_reg);
5190 }
5191 
5192 
5193 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5194  Register scratch = scratch0();
5195  Register input_reg = ToRegister(instr->unclamped());
5196  Register result_reg = ToRegister(instr->result());
5197  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5198  Label is_smi, done, heap_number;
5199 
5200  // Both smi and heap number cases are handled.
5201  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5202 
5203  // Check for heap number
5204  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5205  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5206 
5207  // Check for undefined. Undefined is converted to zero for clamping
5208  // conversions.
5209  DeoptimizeIf(ne, instr->environment(), input_reg,
5210  Operand(factory()->undefined_value()));
5211  __ mov(result_reg, zero_reg);
5212  __ jmp(&done);
5213 
5214  // Heap number
5215  __ bind(&heap_number);
5216  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5218  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5219  __ jmp(&done);
5220 
5221  __ bind(&is_smi);
5222  __ ClampUint8(result_reg, scratch);
5223 
5224  __ bind(&done);
5225 }
5226 
5227 
5228 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5229  DoubleRegister value_reg = ToDoubleRegister(instr->value());
5230  Register result_reg = ToRegister(instr->result());
5231  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5232  __ FmoveHigh(result_reg, value_reg);
5233  } else {
5234  __ FmoveLow(result_reg, value_reg);
5235  }
5236 }
5237 
5238 
5239 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5240  Register hi_reg = ToRegister(instr->hi());
5241  Register lo_reg = ToRegister(instr->lo());
5242  DoubleRegister result_reg = ToDoubleRegister(instr->result());
5243  __ Move(result_reg, lo_reg, hi_reg);
5244 }
5245 
5246 
5247 void LCodeGen::DoAllocate(LAllocate* instr) {
5248  class DeferredAllocate V8_FINAL : public LDeferredCode {
5249  public:
5250  DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5251  : LDeferredCode(codegen), instr_(instr) { }
5252  virtual void Generate() V8_OVERRIDE {
5253  codegen()->DoDeferredAllocate(instr_);
5254  }
5255  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5256  private:
5257  LAllocate* instr_;
5258  };
5259 
5260  DeferredAllocate* deferred =
5261  new(zone()) DeferredAllocate(this, instr);
5262 
5263  Register result = ToRegister(instr->result());
5264  Register scratch = ToRegister(instr->temp1());
5265  Register scratch2 = ToRegister(instr->temp2());
5266 
5267  // Allocate memory for the object.
5268  AllocationFlags flags = TAG_OBJECT;
5269  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5270  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5271  }
5272  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5273  ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5274  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5275  flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5276  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5277  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5278  flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5279  }
5280  if (instr->size()->IsConstantOperand()) {
5281  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5282  if (size <= Page::kMaxRegularHeapObjectSize) {
5283  __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5284  } else {
5285  __ jmp(deferred->entry());
5286  }
5287  } else {
5288  Register size = ToRegister(instr->size());
5289  __ Allocate(size,
5290  result,
5291  scratch,
5292  scratch2,
5293  deferred->entry(),
5294  flags);
5295  }
5296 
5297  __ bind(deferred->exit());
5298 
5299  if (instr->hydrogen()->MustPrefillWithFiller()) {
5300  if (instr->size()->IsConstantOperand()) {
5301  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5302  __ li(scratch, Operand(size));
5303  } else {
5304  scratch = ToRegister(instr->size());
5305  }
5306  __ Subu(scratch, scratch, Operand(kPointerSize));
5307  __ Subu(result, result, Operand(kHeapObjectTag));
5308  Label loop;
5309  __ bind(&loop);
5310  __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5311  __ Addu(at, result, Operand(scratch));
5312  __ sw(scratch2, MemOperand(at));
5313  __ Subu(scratch, scratch, Operand(kPointerSize));
5314  __ Branch(&loop, ge, scratch, Operand(zero_reg));
5315  __ Addu(result, result, Operand(kHeapObjectTag));
5316  }
5317 }
5318 
5319 
5320 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5321  Register result = ToRegister(instr->result());
5322 
5323  // TODO(3095996): Get rid of this. For now, we need to make the
5324  // result register contain a valid pointer because it is already
5325  // contained in the register pointer map.
5326  __ mov(result, zero_reg);
5327 
5328  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5329  if (instr->size()->IsRegister()) {
5330  Register size = ToRegister(instr->size());
5331  ASSERT(!size.is(result));
5332  __ SmiTag(size);
5333  __ push(size);
5334  } else {
5335  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5336  __ Push(Smi::FromInt(size));
5337  }
5338 
5339  int flags = AllocateDoubleAlignFlag::encode(
5340  instr->hydrogen()->MustAllocateDoubleAligned());
5341  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5342  ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5343  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5345  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5346  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5348  } else {
5349  flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5350  }
5351  __ Push(Smi::FromInt(flags));
5352 
5353  CallRuntimeFromDeferred(
5354  Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5355  __ StoreToSafepointRegisterSlot(v0, result);
5356 }
5357 
5358 
5359 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5360  ASSERT(ToRegister(instr->value()).is(a0));
5361  ASSERT(ToRegister(instr->result()).is(v0));
5362  __ push(a0);
5363  CallRuntime(Runtime::kToFastProperties, 1, instr);
5364 }
5365 
5366 
5367 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5368  ASSERT(ToRegister(instr->context()).is(cp));
5369  Label materialized;
5370  // Registers will be used as follows:
5371  // t3 = literals array.
5372  // a1 = regexp literal.
5373  // a0 = regexp literal clone.
5374  // a2 and t0-t2 are used as temporaries.
5375  int literal_offset =
5376  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5377  __ li(t3, instr->hydrogen()->literals());
5378  __ lw(a1, FieldMemOperand(t3, literal_offset));
5379  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5380  __ Branch(&materialized, ne, a1, Operand(at));
5381 
5382  // Create regexp literal using runtime function
5383  // Result will be in v0.
5384  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5385  __ li(t1, Operand(instr->hydrogen()->pattern()));
5386  __ li(t0, Operand(instr->hydrogen()->flags()));
5387  __ Push(t3, t2, t1, t0);
5388  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5389  __ mov(a1, v0);
5390 
5391  __ bind(&materialized);
5393  Label allocated, runtime_allocate;
5394 
5395  __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5396  __ jmp(&allocated);
5397 
5398  __ bind(&runtime_allocate);
5399  __ li(a0, Operand(Smi::FromInt(size)));
5400  __ Push(a1, a0);
5401  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5402  __ pop(a1);
5403 
5404  __ bind(&allocated);
5405  // Copy the content into the newly allocated memory.
5406  // (Unroll copy loop once for better throughput).
5407  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5408  __ lw(a3, FieldMemOperand(a1, i));
5409  __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
5410  __ sw(a3, FieldMemOperand(v0, i));
5411  __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
5412  }
5413  if ((size % (2 * kPointerSize)) != 0) {
5414  __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
5415  __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
5416  }
5417 }
5418 
5419 
5420 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5421  ASSERT(ToRegister(instr->context()).is(cp));
5422  // Use the fast case closure allocation code that allocates in new
5423  // space for nested functions that don't need literals cloning.
5424  bool pretenure = instr->hydrogen()->pretenure();
5425  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5426  FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
5427  instr->hydrogen()->is_generator());
5428  __ li(a2, Operand(instr->hydrogen()->shared_info()));
5429  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5430  } else {
5431  __ li(a2, Operand(instr->hydrogen()->shared_info()));
5432  __ li(a1, Operand(pretenure ? factory()->true_value()
5433  : factory()->false_value()));
5434  __ Push(cp, a2, a1);
5435  CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
5436  }
5437 }
5438 
5439 
5440 void LCodeGen::DoTypeof(LTypeof* instr) {
5441  ASSERT(ToRegister(instr->result()).is(v0));
5442  Register input = ToRegister(instr->value());
5443  __ push(input);
5444  CallRuntime(Runtime::kTypeof, 1, instr);
5445 }
5446 
5447 
5448 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5449  Register input = ToRegister(instr->value());
5450 
5451  Register cmp1 = no_reg;
5452  Operand cmp2 = Operand(no_reg);
5453 
5454  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5455  instr->FalseLabel(chunk_),
5456  input,
5457  instr->type_literal(),
5458  cmp1,
5459  cmp2);
5460 
5461  ASSERT(cmp1.is_valid());
5462  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
5463 
5464  if (final_branch_condition != kNoCondition) {
5465  EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5466  }
5467 }
5468 
5469 
5470 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5471  Label* false_label,
5472  Register input,
5473  Handle<String> type_name,
5474  Register& cmp1,
5475  Operand& cmp2) {
5476  // This function utilizes the delay slot heavily. This is used to load
5477  // values that are always usable without depending on the type of the input
5478  // register.
5479  Condition final_branch_condition = kNoCondition;
5480  Register scratch = scratch0();
5481  if (type_name->Equals(heap()->number_string())) {
5482  __ JumpIfSmi(input, true_label);
5483  __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5484  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5485  cmp1 = input;
5486  cmp2 = Operand(at);
5487  final_branch_condition = eq;
5488 
5489  } else if (type_name->Equals(heap()->string_string())) {
5490  __ JumpIfSmi(input, false_label);
5491  __ GetObjectType(input, input, scratch);
5492  __ Branch(USE_DELAY_SLOT, false_label,
5493  ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5494  // input is an object so we can load the BitFieldOffset even if we take the
5495  // other branch.
5496  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5497  __ And(at, at, 1 << Map::kIsUndetectable);
5498  cmp1 = at;
5499  cmp2 = Operand(zero_reg);
5500  final_branch_condition = eq;
5501 
5502  } else if (type_name->Equals(heap()->symbol_string())) {
5503  __ JumpIfSmi(input, false_label);
5504  __ GetObjectType(input, input, scratch);
5505  cmp1 = scratch;
5506  cmp2 = Operand(SYMBOL_TYPE);
5507  final_branch_condition = eq;
5508 
5509  } else if (type_name->Equals(heap()->boolean_string())) {
5510  __ LoadRoot(at, Heap::kTrueValueRootIndex);
5511  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5512  __ LoadRoot(at, Heap::kFalseValueRootIndex);
5513  cmp1 = at;
5514  cmp2 = Operand(input);
5515  final_branch_condition = eq;
5516 
5517  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5518  __ LoadRoot(at, Heap::kNullValueRootIndex);
5519  cmp1 = at;
5520  cmp2 = Operand(input);
5521  final_branch_condition = eq;
5522 
5523  } else if (type_name->Equals(heap()->undefined_string())) {
5524  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5525  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5526  // The first instruction of JumpIfSmi is an And - it is safe in the delay
5527  // slot.
5528  __ JumpIfSmi(input, false_label);
5529  // Check for undetectable objects => true.
5530  __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5531  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5532  __ And(at, at, 1 << Map::kIsUndetectable);
5533  cmp1 = at;
5534  cmp2 = Operand(zero_reg);
5535  final_branch_condition = ne;
5536 
5537  } else if (type_name->Equals(heap()->function_string())) {
5539  __ JumpIfSmi(input, false_label);
5540  __ GetObjectType(input, scratch, input);
5541  __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5542  cmp1 = input;
5543  cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
5544  final_branch_condition = eq;
5545 
5546  } else if (type_name->Equals(heap()->object_string())) {
5547  __ JumpIfSmi(input, false_label);
5548  if (!FLAG_harmony_typeof) {
5549  __ LoadRoot(at, Heap::kNullValueRootIndex);
5550  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5551  }
5552  Register map = input;
5553  __ GetObjectType(input, map, scratch);
5554  __ Branch(false_label,
5555  lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
5556  __ Branch(USE_DELAY_SLOT, false_label,
5557  gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
5558  // map is still valid, so the BitField can be loaded in delay slot.
5559  // Check for undetectable objects => false.
5560  __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
5561  __ And(at, at, 1 << Map::kIsUndetectable);
5562  cmp1 = at;
5563  cmp2 = Operand(zero_reg);
5564  final_branch_condition = eq;
5565 
5566  } else {
5567  cmp1 = at;
5568  cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5569  __ Branch(false_label);
5570  }
5571 
5572  return final_branch_condition;
5573 }
5574 
5575 
5576 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5577  Register temp1 = ToRegister(instr->temp());
5578 
5579  EmitIsConstructCall(temp1, scratch0());
5580 
5581  EmitBranch(instr, eq, temp1,
5582  Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5583 }
5584 
5585 
5586 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5587  ASSERT(!temp1.is(temp2));
5588  // Get the frame pointer for the calling frame.
5590 
5591  // Skip the arguments adaptor frame if it exists.
5592  Label check_frame_marker;
5594  __ Branch(&check_frame_marker, ne, temp2,
5597 
5598  // Check the marker in the calling frame.
5599  __ bind(&check_frame_marker);
5601 }
5602 
5603 
5604 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5605  if (!info()->IsStub()) {
5606  // Ensure that we have enough space after the previous lazy-bailout
5607  // instruction for patching the code here.
5608  int current_pc = masm()->pc_offset();
5609  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5610  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5611  ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5612  while (padding_size > 0) {
5613  __ nop();
5614  padding_size -= Assembler::kInstrSize;
5615  }
5616  }
5617  }
5618  last_lazy_deopt_pc_ = masm()->pc_offset();
5619 }
5620 
5621 
5622 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5623  last_lazy_deopt_pc_ = masm()->pc_offset();
5624  ASSERT(instr->HasEnvironment());
5625  LEnvironment* env = instr->environment();
5626  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5627  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5628 }
5629 
5630 
5631 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5632  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5633  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5634  // needed return address), even though the implementation of LAZY and EAGER is
5635  // now identical. When LAZY is eventually completely folded into EAGER, remove
5636  // the special case below.
5637  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5638  type = Deoptimizer::LAZY;
5639  }
5640 
5641  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5642  DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
5643 }
5644 
5645 
5646 void LCodeGen::DoDummy(LDummy* instr) {
5647  // Nothing to see here, move on!
5648 }
5649 
5650 
5651 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5652  // Nothing to see here, move on!
5653 }
5654 
5655 
5656 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5657  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5658  LoadContextFromDeferred(instr->context());
5659  __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
5660  RecordSafepointWithLazyDeopt(
5661  instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5662  ASSERT(instr->HasEnvironment());
5663  LEnvironment* env = instr->environment();
5664  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5665 }
5666 
5667 
5668 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5669  class DeferredStackCheck V8_FINAL : public LDeferredCode {
5670  public:
5671  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5672  : LDeferredCode(codegen), instr_(instr) { }
5673  virtual void Generate() V8_OVERRIDE {
5674  codegen()->DoDeferredStackCheck(instr_);
5675  }
5676  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5677  private:
5678  LStackCheck* instr_;
5679  };
5680 
5681  ASSERT(instr->HasEnvironment());
5682  LEnvironment* env = instr->environment();
5683  // There is no LLazyBailout instruction for stack-checks. We have to
5684  // prepare for lazy deoptimization explicitly here.
5685  if (instr->hydrogen()->is_function_entry()) {
5686  // Perform stack overflow check.
5687  Label done;
5688  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5689  __ Branch(&done, hs, sp, Operand(at));
5690  ASSERT(instr->context()->IsRegister());
5691  ASSERT(ToRegister(instr->context()).is(cp));
5692  CallCode(isolate()->builtins()->StackCheck(),
5693  RelocInfo::CODE_TARGET,
5694  instr);
5695  __ bind(&done);
5696  } else {
5697  ASSERT(instr->hydrogen()->is_backwards_branch());
5698  // Perform stack overflow check if this goto needs it before jumping.
5699  DeferredStackCheck* deferred_stack_check =
5700  new(zone()) DeferredStackCheck(this, instr);
5701  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5702  __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5703  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5704  __ bind(instr->done_label());
5705  deferred_stack_check->SetExit(instr->done_label());
5706  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5707  // Don't record a deoptimization index for the safepoint here.
5708  // This will be done explicitly when emitting call and the safepoint in
5709  // the deferred code.
5710  }
5711 }
5712 
5713 
5714 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5715  // This is a pseudo-instruction that ensures that the environment here is
5716  // properly registered for deoptimization and records the assembler's PC
5717  // offset.
5718  LEnvironment* environment = instr->environment();
5719 
5720  // If the environment were already registered, we would have no way of
5721  // backpatching it with the spill slot operands.
5722  ASSERT(!environment->HasBeenRegistered());
5723  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5724 
5725  GenerateOsrPrologue();
5726 }
5727 
5728 
5729 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5730  Register result = ToRegister(instr->result());
5731  Register object = ToRegister(instr->object());
5732  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5733  DeoptimizeIf(eq, instr->environment(), object, Operand(at));
5734 
5735  Register null_value = t1;
5736  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5737  DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
5738 
5739  __ And(at, object, kSmiTagMask);
5740  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5741 
5743  __ GetObjectType(object, a1, a1);
5744  DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5745 
5746  Label use_cache, call_runtime;
5747  ASSERT(object.is(a0));
5748  __ CheckEnumCache(null_value, &call_runtime);
5749 
5750  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5751  __ Branch(&use_cache);
5752 
5753  // Get the set of properties to enumerate.
5754  __ bind(&call_runtime);
5755  __ push(object);
5756  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5757 
5759  ASSERT(result.is(v0));
5760  __ LoadRoot(at, Heap::kMetaMapRootIndex);
5761  DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5762  __ bind(&use_cache);
5763 }
5764 
5765 
5766 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5767  Register map = ToRegister(instr->map());
5768  Register result = ToRegister(instr->result());
5769  Label load_cache, done;
5770  __ EnumLength(result, map);
5771  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5772  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5773  __ jmp(&done);
5774 
5775  __ bind(&load_cache);
5776  __ LoadInstanceDescriptors(map, result);
5777  __ lw(result,
5779  __ lw(result,
5780  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5781  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5782 
5783  __ bind(&done);
5784 }
5785 
5786 
5787 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5788  Register object = ToRegister(instr->value());
5789  Register map = ToRegister(instr->map());
5790  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5791  DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5792 }
5793 
5794 
5795 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5796  Register object = ToRegister(instr->object());
5797  Register index = ToRegister(instr->index());
5798  Register result = ToRegister(instr->result());
5799  Register scratch = scratch0();
5800 
5801  Label out_of_object, done;
5802  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5803  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5804 
5806  __ Addu(scratch, object, scratch);
5807  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5808 
5809  __ Branch(&done);
5810 
5811  __ bind(&out_of_object);
5812  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5813  // Index is equal to negated out of object property index plus 1.
5814  __ Subu(scratch, result, scratch);
5815  __ lw(result, FieldMemOperand(scratch,
5816  FixedArray::kHeaderSize - kPointerSize));
5817  __ bind(&done);
5818 }
5819 
5820 
5821 #undef __
5822 
5823 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1267
const FPURegister f4
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static DwVfpRegister FromAllocationIndex(int index)
void FinishCode(Handle< Code > code)
int index() const
Definition: lithium.h:61
static const int kHashFieldOffset
Definition: objects.h:8629
const int kMinInt
Definition: globals.h:249
static const int kBitFieldOffset
Definition: objects.h:6461
MemOperand ToHighMemOperand(LOperand *op) const
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
const intptr_t kSmiTagMask
Definition: v8.h:5480
static const int kCodeEntryOffset
Definition: objects.h:7518
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
static const int kValueOffset
Definition: objects.h:9547
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static const int kEnumCacheOffset
Definition: objects.h:3499
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const Register cp
int StackSlotOffset(int index)
Definition: lithium.cc:240
RegisterType type() const
const FPURegister f0
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
Smi * ToSmi(LConstantOperand *op) const
const int KB
Definition: globals.h:245
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
Register EmitLoadRegister(LOperand *op, Register scratch)
static Handle< Code > GetUninitialized(Isolate *isolate, Token::Value op)
Definition: ic.cc:2489
static const int kDataOffset
Definition: objects.h:4970
bool IsSmi(LConstantOperand *op) const
static Handle< T > cast(Handle< S > that)
Definition: handles.h:75
const FPURegister f22
static const int kGlobalReceiverOffset
Definition: objects.h:7613
static Representation Integer32()
static const int kExponentBias
Definition: objects.h:1985
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:41
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8673
AllocationSiteOverrideMode
Definition: code-stubs.h:759
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
static const int kSize
Definition: objects.h:7922
#define ASSERT(condition)
Definition: checks.h:329
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
static const int kContextOffset
Definition: frames.h:185
const int kPointerSizeLog2
Definition: globals.h:281
unsigned short uint16_t
Definition: unicode.cc:46
static const int kInObjectFieldCount
Definition: objects.h:7976
const uint32_t kStringRepresentationMask
Definition: objects.h:615
MemOperand GlobalObjectOperand()
static const int kCallerFPOffset
Definition: frames.h:188
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
static Handle< Code > initialize_stub(Isolate *isolate, ExtraICState extra_state)
Definition: ic.cc:782
static const int kInstanceClassNameOffset
Definition: objects.h:7107
int WhichPowerOf2(uint32_t x)
Definition: utils.h:57
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Variable * parameter(int index) const
Definition: scopes.h:333
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:7523
#define V8_INFINITY
Definition: globals.h:44
void DoDeferredStackCheck(LStackCheck *instr)
DwVfpRegister EmitLoadDoubleRegister(LOperand *op, SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch)
int LookupDestination(int block_id) const
Condition ReverseCondition(Condition cond)
#define __
#define IN
Operand ToOperand(LOperand *op)
const Register sp
const uint32_t kSlotsZapValue
Definition: v8globals.h:86
#define UNREACHABLE()
Definition: checks.h:52
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:261
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
static const int kLengthOffset
Definition: objects.h:8905
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kExponentShift
Definition: objects.h:1986
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const int kDoubleSize
Definition: globals.h:266
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:7098
void DoDeferredNumberTagD(LNumberTagD *instr)
static uint32_t update(uint32_tprevious, AllocationSpacevalue)
Definition: utils.h:296
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:683
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
Definition: flags.cc:211
static void MaybeCallEntryHook(MacroAssembler *masm)
DwVfpRegister ToDoubleRegister(LOperand *op) const
const int kHeapObjectTag
Definition: v8.h:5473
void DoDeferredAllocate(LAllocate *instr)
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
Definition: deoptimizer.cc:701
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
static const int kCallerSPOffset
Definition: frames.h:190
static const int kCacheStampOffset
Definition: objects.h:7787
bool IsFixedTypedArrayElementsKind(ElementsKind kind)
static const int kPropertiesOffset
Definition: objects.h:2755
int32_t ToInteger32(LConstantOperand *op) const
int num_parameters() const
Definition: scopes.h:338
bool IsInteger32(LConstantOperand *op) const
static const int kMarkerOffset
Definition: frames.h:184
bool IsFastSmiElementsKind(ElementsKind kind)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
Handle< Object > ToHandle(LConstantOperand *op) const
static const int kHeaderSize
Definition: objects.h:9042
bool IsPowerOf2(T x)
Definition: utils.h:51
const FPURegister f2
#define STATIC_ASCII_VECTOR(x)
Definition: utils.h:570
friend class BlockTrampolinePoolScope
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
Definition: utils.h:574
static int OffsetOfElementAt(int index)
Definition: objects.h:3070
SwVfpRegister low() const
static int SizeFor(int length)
Definition: objects.h:3067
bool NeedsDeferredFrame() const
static const int kHeaderSize
Definition: objects.h:3016
AllocationFlags
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
Definition: shell.cc:171
static const int kMapOffset
Definition: objects.h:1890
static const int kValueOffset
Definition: objects.h:7779
static const int kFixedFrameSizeFromFp
Definition: frames.h:180
#define kDoubleRegZero
#define V8_OVERRIDE
Definition: v8config.h:402
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
bool IsNumber() const
Definition: api.cc:2416
MemOperand FieldMemOperand(Register object, int offset)
static const int kHasNonInstancePrototype
Definition: objects.h:6468
void WriteTranslation(LEnvironment *environment, Translation *translation)
static const uint32_t kSignMask
Definition: objects.h:1980
static const int kNotDeoptimizationEntry
Definition: deoptimizer.h:258
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
const int kSmiTagSize
Definition: v8.h:5479
static const int kHeaderSize
Definition: objects.h:5604
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
MemOperand PrepareKeyedOperand(Register key, Register base, bool key_is_constant, int constant_key, int element_size, int shift_size, int additional_index, int additional_offset)
T Abs(T a)
Definition: utils.h:241
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
Definition: objects-inl.h:5190
static const int kConstructorOffset
Definition: objects.h:6428
const uint32_t kOneByteStringTag
Definition: objects.h:611
static const int kIsUndetectable
Definition: objects.h:6472
virtual void AfterCall() const V8_OVERRIDE
static const int kHeaderSize
Definition: objects.h:2757
Register ToRegister(LOperand *op) const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
void DoDeferredTaggedToI(LTaggedToI *instr)
static const int kInstrSize
static const int kPrototypeOffset
Definition: objects.h:6427
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
const Register fp
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
MemOperand ToMemOperand(LOperand *op) const
bool IsNextEmittedBlock(int block_id) const
static const int kExponentBits
Definition: objects.h:1984
static const int kCompilerHintsOffset
Definition: objects.h:7171
RAStatus GetRAState() const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
static const int kSharedFunctionInfoOffset
Definition: objects.h:7521
static const int kMaxValue
Definition: objects.h:1681
const int kCharSize
Definition: globals.h:261
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8914
static const int kExponentOffset
Definition: objects.h:1977
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1516
const int kUC16Size
Definition: globals.h:312
const uint32_t kStringEncodingMask
Definition: objects.h:609
static const int kInstanceTypeOffset
Definition: objects.h:6459
virtual void BeforeCall(int call_size) const V8_OVERRIDE
static const int kMantissaOffset
Definition: objects.h:1976