v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 #include "hydrogen-osr.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 class SafepointGenerator V8_FINAL : public CallWrapper {
41  public:
43  LPointerMap* pointers,
44  Safepoint::DeoptMode mode)
45  : codegen_(codegen),
46  pointers_(pointers),
47  deopt_mode_(mode) { }
48  virtual ~SafepointGenerator() {}
49 
50  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
51 
52  virtual void AfterCall() const V8_OVERRIDE {
53  codegen_->RecordSafepoint(pointers_, deopt_mode_);
54  }
55 
56  private:
57  LCodeGen* codegen_;
58  LPointerMap* pointers_;
59  Safepoint::DeoptMode deopt_mode_;
60 };
61 
62 
63 #define __ masm()->
64 
66  LPhase phase("Z_Code generation", chunk());
67  ASSERT(is_unused());
68  status_ = GENERATING;
69 
70  // Open a frame scope to indicate that there is a frame on the stack. The
71  // NONE indicates that the scope shouldn't actually generate code to set up
72  // the frame (that is done in GeneratePrologue).
73  FrameScope frame_scope(masm_, StackFrame::NONE);
74 
75  return GeneratePrologue() &&
76  GenerateBody() &&
77  GenerateDeferredCode() &&
78  GenerateDeoptJumpTable() &&
79  GenerateSafepointTable();
80 }
81 
82 
84  ASSERT(is_done());
85  code->set_stack_slots(GetStackSlotCount());
86  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
88  PopulateDeoptimizationData(code);
89  info()->CommitDependencies(code);
90 }
91 
92 
93 void LCodeGen::Abort(BailoutReason reason) {
94  info()->set_bailout_reason(reason);
95  status_ = ABORTED;
96 }
97 
98 
99 void LCodeGen::SaveCallerDoubles() {
100  ASSERT(info()->saves_caller_doubles());
102  Comment(";;; Save clobbered callee double registers");
103  int count = 0;
104  BitVector* doubles = chunk()->allocated_double_registers();
105  BitVector::Iterator save_iterator(doubles);
106  while (!save_iterator.Done()) {
107  __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
108  MemOperand(sp, count * kDoubleSize));
109  save_iterator.Advance();
110  count++;
111  }
112 }
113 
114 
115 void LCodeGen::RestoreCallerDoubles() {
116  ASSERT(info()->saves_caller_doubles());
118  Comment(";;; Restore clobbered callee double registers");
119  BitVector* doubles = chunk()->allocated_double_registers();
120  BitVector::Iterator save_iterator(doubles);
121  int count = 0;
122  while (!save_iterator.Done()) {
123  __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
124  MemOperand(sp, count * kDoubleSize));
125  save_iterator.Advance();
126  count++;
127  }
128 }
129 
130 
131 bool LCodeGen::GeneratePrologue() {
132  ASSERT(is_generating());
133 
134  if (info()->IsOptimizing()) {
136 
137 #ifdef DEBUG
138  if (strlen(FLAG_stop_at) > 0 &&
139  info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
140  __ stop("stop_at");
141  }
142 #endif
143 
144  // r1: Callee's JS function.
145  // cp: Callee's context.
146  // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
147  // fp: Caller's frame pointer.
148  // lr: Caller's pc.
149 
150  // Sloppy mode functions and builtins need to replace the receiver with the
151  // global proxy when called as functions (without an explicit receiver
152  // object).
153  if (info_->this_has_uses() &&
154  info_->strict_mode() == SLOPPY &&
155  !info_->is_native()) {
156  Label ok;
157  int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
158  __ ldr(r2, MemOperand(sp, receiver_offset));
159  __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
160  __ b(ne, &ok);
161 
162  __ ldr(r2, GlobalObjectOperand());
164 
165  __ str(r2, MemOperand(sp, receiver_offset));
166 
167  __ bind(&ok);
168  }
169  }
170 
171  info()->set_prologue_offset(masm_->pc_offset());
172  if (NeedsEagerFrame()) {
173  __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
174  frame_is_built_ = true;
175  info_->AddNoFrameRange(0, masm_->pc_offset());
176  }
177 
178  // Reserve space for the stack slots needed by the code.
179  int slots = GetStackSlotCount();
180  if (slots > 0) {
181  if (FLAG_debug_code) {
182  __ sub(sp, sp, Operand(slots * kPointerSize));
183  __ push(r0);
184  __ push(r1);
185  __ add(r0, sp, Operand(slots * kPointerSize));
186  __ mov(r1, Operand(kSlotsZapValue));
187  Label loop;
188  __ bind(&loop);
189  __ sub(r0, r0, Operand(kPointerSize));
190  __ str(r1, MemOperand(r0, 2 * kPointerSize));
191  __ cmp(r0, sp);
192  __ b(ne, &loop);
193  __ pop(r1);
194  __ pop(r0);
195  } else {
196  __ sub(sp, sp, Operand(slots * kPointerSize));
197  }
198  }
199 
200  if (info()->saves_caller_doubles()) {
201  SaveCallerDoubles();
202  }
203 
204  // Possibly allocate a local context.
205  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
206  if (heap_slots > 0) {
207  Comment(";;; Allocate local context");
208  // Argument to NewContext is the function, which is in r1.
209  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
210  FastNewContextStub stub(heap_slots);
211  __ CallStub(&stub);
212  } else {
213  __ push(r1);
214  __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
215  }
216  RecordSafepoint(Safepoint::kNoLazyDeopt);
217  // Context is returned in both r0 and cp. It replaces the context
218  // passed to us. It's saved in the stack and kept live in cp.
219  __ mov(cp, r0);
221  // Copy any necessary parameters into the context.
222  int num_parameters = scope()->num_parameters();
223  for (int i = 0; i < num_parameters; i++) {
224  Variable* var = scope()->parameter(i);
225  if (var->IsContextSlot()) {
226  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
227  (num_parameters - 1 - i) * kPointerSize;
228  // Load parameter from stack.
229  __ ldr(r0, MemOperand(fp, parameter_offset));
230  // Store it in the context.
231  MemOperand target = ContextOperand(cp, var->index());
232  __ str(r0, target);
233  // Update the write barrier. This clobbers r3 and r0.
234  __ RecordWriteContextSlot(
235  cp,
236  target.offset(),
237  r0,
238  r3,
240  kSaveFPRegs);
241  }
242  }
243  Comment(";;; End allocate local context");
244  }
245 
246  // Trace the call.
247  if (FLAG_trace && info()->IsOptimizing()) {
248  // We have not executed any compiled code yet, so cp still holds the
249  // incoming context.
250  __ CallRuntime(Runtime::kTraceEnter, 0);
251  }
252  return !is_aborted();
253 }
254 
255 
256 void LCodeGen::GenerateOsrPrologue() {
257  // Generate the OSR entry prologue at the first unknown OSR value, or if there
258  // are none, at the OSR entrypoint instruction.
259  if (osr_pc_offset_ >= 0) return;
260 
261  osr_pc_offset_ = masm()->pc_offset();
262 
263  // Adjust the frame size, subsuming the unoptimized frame into the
264  // optimized frame.
265  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
266  ASSERT(slots >= 0);
267  __ sub(sp, sp, Operand(slots * kPointerSize));
268 }
269 
270 
271 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
272  if (instr->IsCall()) {
273  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
274  }
275  if (!instr->IsLazyBailout() && !instr->IsGap()) {
276  safepoints_.BumpLastLazySafepointIndex();
277  }
278 }
279 
280 
281 bool LCodeGen::GenerateDeferredCode() {
282  ASSERT(is_generating());
283  if (deferred_.length() > 0) {
284  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
285  LDeferredCode* code = deferred_[i];
286 
287  HValue* value =
288  instructions_->at(code->instruction_index())->hydrogen_value();
289  RecordAndWritePosition(
290  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
291 
292  Comment(";;; <@%d,#%d> "
293  "-------------------- Deferred %s --------------------",
294  code->instruction_index(),
295  code->instr()->hydrogen_value()->id(),
296  code->instr()->Mnemonic());
297  __ bind(code->entry());
298  if (NeedsDeferredFrame()) {
299  Comment(";;; Build frame");
300  ASSERT(!frame_is_built_);
301  ASSERT(info()->IsStub());
302  frame_is_built_ = true;
303  __ PushFixedFrame();
304  __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
305  __ push(scratch0());
307  Comment(";;; Deferred code");
308  }
309  code->Generate();
310  if (NeedsDeferredFrame()) {
311  Comment(";;; Destroy frame");
312  ASSERT(frame_is_built_);
313  __ pop(ip);
314  __ PopFixedFrame();
315  frame_is_built_ = false;
316  }
317  __ jmp(code->exit());
318  }
319  }
320 
321  // Force constant pool emission at the end of the deferred code to make
322  // sure that no constant pools are emitted after.
323  masm()->CheckConstPool(true, false);
324 
325  return !is_aborted();
326 }
327 
328 
329 bool LCodeGen::GenerateDeoptJumpTable() {
330  // Check that the jump table is accessible from everywhere in the function
331  // code, i.e. that offsets to the table can be encoded in the 24bit signed
332  // immediate of a branch instruction.
333  // To simplify we consider the code size from the first instruction to the
334  // end of the jump table. We also don't consider the pc load delta.
335  // Each entry in the jump table generates one instruction and inlines one
336  // 32bit data after it.
337  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
338  deopt_jump_table_.length() * 7)) {
339  Abort(kGeneratedCodeIsTooLarge);
340  }
341 
342  if (deopt_jump_table_.length() > 0) {
343  Comment(";;; -------------------- Jump table --------------------");
344  }
345  Label table_start;
346  __ bind(&table_start);
347  Label needs_frame;
348  for (int i = 0; i < deopt_jump_table_.length(); i++) {
349  __ bind(&deopt_jump_table_[i].label);
350  Address entry = deopt_jump_table_[i].address;
351  Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
352  int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
354  Comment(";;; jump table entry %d.", i);
355  } else {
356  Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
357  }
358  if (deopt_jump_table_[i].needs_frame) {
359  ASSERT(!info()->saves_caller_doubles());
360  __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
361  if (needs_frame.is_bound()) {
362  __ b(&needs_frame);
363  } else {
364  __ bind(&needs_frame);
365  __ PushFixedFrame();
366  // This variant of deopt can only be used with stubs. Since we don't
367  // have a function pointer to install in the stack frame that we're
368  // building, install a special marker there instead.
369  ASSERT(info()->IsStub());
370  __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
371  __ push(scratch0());
373  __ mov(lr, Operand(pc), LeaveCC, al);
374  __ mov(pc, ip);
375  }
376  } else {
377  if (info()->saves_caller_doubles()) {
378  ASSERT(info()->IsStub());
379  RestoreCallerDoubles();
380  }
381  __ mov(lr, Operand(pc), LeaveCC, al);
382  __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
383  }
384  masm()->CheckConstPool(false, false);
385  }
386 
387  // Force constant pool emission at the end of the deopt jump table to make
388  // sure that no constant pools are emitted after.
389  masm()->CheckConstPool(true, false);
390 
391  // The deoptimization jump table is the last part of the instruction
392  // sequence. Mark the generated code as done unless we bailed out.
393  if (!is_aborted()) status_ = DONE;
394  return !is_aborted();
395 }
396 
397 
398 bool LCodeGen::GenerateSafepointTable() {
399  ASSERT(is_done());
400  safepoints_.Emit(masm(), GetStackSlotCount());
401  return !is_aborted();
402 }
403 
404 
405 Register LCodeGen::ToRegister(int index) const {
406  return Register::FromAllocationIndex(index);
407 }
408 
409 
410 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
412 }
413 
414 
416  ASSERT(op->IsRegister());
417  return ToRegister(op->index());
418 }
419 
420 
422  if (op->IsRegister()) {
423  return ToRegister(op->index());
424  } else if (op->IsConstantOperand()) {
425  LConstantOperand* const_op = LConstantOperand::cast(op);
426  HConstant* constant = chunk_->LookupConstant(const_op);
427  Handle<Object> literal = constant->handle(isolate());
428  Representation r = chunk_->LookupLiteralRepresentation(const_op);
429  if (r.IsInteger32()) {
430  ASSERT(literal->IsNumber());
431  __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
432  } else if (r.IsDouble()) {
433  Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
434  } else {
435  ASSERT(r.IsSmiOrTagged());
436  __ Move(scratch, literal);
437  }
438  return scratch;
439  } else if (op->IsStackSlot()) {
440  __ ldr(scratch, ToMemOperand(op));
441  return scratch;
442  }
443  UNREACHABLE();
444  return scratch;
445 }
446 
447 
449  ASSERT(op->IsDoubleRegister());
450  return ToDoubleRegister(op->index());
451 }
452 
453 
455  SwVfpRegister flt_scratch,
456  DwVfpRegister dbl_scratch) {
457  if (op->IsDoubleRegister()) {
458  return ToDoubleRegister(op->index());
459  } else if (op->IsConstantOperand()) {
460  LConstantOperand* const_op = LConstantOperand::cast(op);
461  HConstant* constant = chunk_->LookupConstant(const_op);
462  Handle<Object> literal = constant->handle(isolate());
463  Representation r = chunk_->LookupLiteralRepresentation(const_op);
464  if (r.IsInteger32()) {
465  ASSERT(literal->IsNumber());
466  __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
467  __ vmov(flt_scratch, ip);
468  __ vcvt_f64_s32(dbl_scratch, flt_scratch);
469  return dbl_scratch;
470  } else if (r.IsDouble()) {
471  Abort(kUnsupportedDoubleImmediate);
472  } else if (r.IsTagged()) {
473  Abort(kUnsupportedTaggedImmediate);
474  }
475  } else if (op->IsStackSlot()) {
476  // TODO(regis): Why is vldr not taking a MemOperand?
477  // __ vldr(dbl_scratch, ToMemOperand(op));
478  MemOperand mem_op = ToMemOperand(op);
479  __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
480  return dbl_scratch;
481  }
482  UNREACHABLE();
483  return dbl_scratch;
484 }
485 
486 
487 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
488  HConstant* constant = chunk_->LookupConstant(op);
489  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
490  return constant->handle(isolate());
491 }
492 
493 
494 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
495  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
496 }
497 
498 
499 bool LCodeGen::IsSmi(LConstantOperand* op) const {
500  return chunk_->LookupLiteralRepresentation(op).IsSmi();
501 }
502 
503 
504 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
506 }
507 
508 
509 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
510  const Representation& r) const {
511  HConstant* constant = chunk_->LookupConstant(op);
512  int32_t value = constant->Integer32Value();
513  if (r.IsInteger32()) return value;
514  ASSERT(r.IsSmiOrTagged());
515  return reinterpret_cast<int32_t>(Smi::FromInt(value));
516 }
517 
518 
519 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
520  HConstant* constant = chunk_->LookupConstant(op);
521  return Smi::FromInt(constant->Integer32Value());
522 }
523 
524 
525 double LCodeGen::ToDouble(LConstantOperand* op) const {
526  HConstant* constant = chunk_->LookupConstant(op);
527  ASSERT(constant->HasDoubleValue());
528  return constant->DoubleValue();
529 }
530 
531 
533  if (op->IsConstantOperand()) {
534  LConstantOperand* const_op = LConstantOperand::cast(op);
535  HConstant* constant = chunk()->LookupConstant(const_op);
536  Representation r = chunk_->LookupLiteralRepresentation(const_op);
537  if (r.IsSmi()) {
538  ASSERT(constant->HasSmiValue());
539  return Operand(Smi::FromInt(constant->Integer32Value()));
540  } else if (r.IsInteger32()) {
541  ASSERT(constant->HasInteger32Value());
542  return Operand(constant->Integer32Value());
543  } else if (r.IsDouble()) {
544  Abort(kToOperandUnsupportedDoubleImmediate);
545  }
546  ASSERT(r.IsTagged());
547  return Operand(constant->handle(isolate()));
548  } else if (op->IsRegister()) {
549  return Operand(ToRegister(op));
550  } else if (op->IsDoubleRegister()) {
551  Abort(kToOperandIsDoubleRegisterUnimplemented);
552  return Operand::Zero();
553  }
554  // Stack slots not implemented, use ToMemOperand instead.
555  UNREACHABLE();
556  return Operand::Zero();
557 }
558 
559 
560 static int ArgumentsOffsetWithoutFrame(int index) {
561  ASSERT(index < 0);
562  return -(index + 1) * kPointerSize;
563 }
564 
565 
567  ASSERT(!op->IsRegister());
568  ASSERT(!op->IsDoubleRegister());
569  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
570  if (NeedsEagerFrame()) {
571  return MemOperand(fp, StackSlotOffset(op->index()));
572  } else {
573  // Retrieve parameter without eager stack-frame relative to the
574  // stack-pointer.
575  return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
576  }
577 }
578 
579 
581  ASSERT(op->IsDoubleStackSlot());
582  if (NeedsEagerFrame()) {
583  return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
584  } else {
585  // Retrieve parameter without eager stack-frame relative to the
586  // stack-pointer.
587  return MemOperand(
588  sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
589  }
590 }
591 
592 
593 void LCodeGen::WriteTranslation(LEnvironment* environment,
594  Translation* translation) {
595  if (environment == NULL) return;
596 
597  // The translation includes one command per value in the environment.
598  int translation_size = environment->translation_size();
599  // The output frame height does not include the parameters.
600  int height = translation_size - environment->parameter_count();
601 
602  WriteTranslation(environment->outer(), translation);
603  bool has_closure_id = !info()->closure().is_null() &&
604  !info()->closure().is_identical_to(environment->closure());
605  int closure_id = has_closure_id
606  ? DefineDeoptimizationLiteral(environment->closure())
607  : Translation::kSelfLiteralId;
608 
609  switch (environment->frame_type()) {
610  case JS_FUNCTION:
611  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
612  break;
613  case JS_CONSTRUCT:
614  translation->BeginConstructStubFrame(closure_id, translation_size);
615  break;
616  case JS_GETTER:
617  ASSERT(translation_size == 1);
618  ASSERT(height == 0);
619  translation->BeginGetterStubFrame(closure_id);
620  break;
621  case JS_SETTER:
622  ASSERT(translation_size == 2);
623  ASSERT(height == 0);
624  translation->BeginSetterStubFrame(closure_id);
625  break;
626  case STUB:
627  translation->BeginCompiledStubFrame();
628  break;
629  case ARGUMENTS_ADAPTOR:
630  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
631  break;
632  }
633 
634  int object_index = 0;
635  int dematerialized_index = 0;
636  for (int i = 0; i < translation_size; ++i) {
637  LOperand* value = environment->values()->at(i);
638  AddToTranslation(environment,
639  translation,
640  value,
641  environment->HasTaggedValueAt(i),
642  environment->HasUint32ValueAt(i),
643  &object_index,
644  &dematerialized_index);
645  }
646 }
647 
648 
649 void LCodeGen::AddToTranslation(LEnvironment* environment,
650  Translation* translation,
651  LOperand* op,
652  bool is_tagged,
653  bool is_uint32,
654  int* object_index_pointer,
655  int* dematerialized_index_pointer) {
656  if (op == LEnvironment::materialization_marker()) {
657  int object_index = (*object_index_pointer)++;
658  if (environment->ObjectIsDuplicateAt(object_index)) {
659  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
660  translation->DuplicateObject(dupe_of);
661  return;
662  }
663  int object_length = environment->ObjectLengthAt(object_index);
664  if (environment->ObjectIsArgumentsAt(object_index)) {
665  translation->BeginArgumentsObject(object_length);
666  } else {
667  translation->BeginCapturedObject(object_length);
668  }
669  int dematerialized_index = *dematerialized_index_pointer;
670  int env_offset = environment->translation_size() + dematerialized_index;
671  *dematerialized_index_pointer += object_length;
672  for (int i = 0; i < object_length; ++i) {
673  LOperand* value = environment->values()->at(env_offset + i);
674  AddToTranslation(environment,
675  translation,
676  value,
677  environment->HasTaggedValueAt(env_offset + i),
678  environment->HasUint32ValueAt(env_offset + i),
679  object_index_pointer,
680  dematerialized_index_pointer);
681  }
682  return;
683  }
684 
685  if (op->IsStackSlot()) {
686  if (is_tagged) {
687  translation->StoreStackSlot(op->index());
688  } else if (is_uint32) {
689  translation->StoreUint32StackSlot(op->index());
690  } else {
691  translation->StoreInt32StackSlot(op->index());
692  }
693  } else if (op->IsDoubleStackSlot()) {
694  translation->StoreDoubleStackSlot(op->index());
695  } else if (op->IsRegister()) {
696  Register reg = ToRegister(op);
697  if (is_tagged) {
698  translation->StoreRegister(reg);
699  } else if (is_uint32) {
700  translation->StoreUint32Register(reg);
701  } else {
702  translation->StoreInt32Register(reg);
703  }
704  } else if (op->IsDoubleRegister()) {
706  translation->StoreDoubleRegister(reg);
707  } else if (op->IsConstantOperand()) {
708  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
709  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
710  translation->StoreLiteral(src_index);
711  } else {
712  UNREACHABLE();
713  }
714 }
715 
716 
717 void LCodeGen::CallCode(Handle<Code> code,
718  RelocInfo::Mode mode,
719  LInstruction* instr,
720  TargetAddressStorageMode storage_mode) {
721  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
722 }
723 
724 
725 void LCodeGen::CallCodeGeneric(Handle<Code> code,
726  RelocInfo::Mode mode,
727  LInstruction* instr,
728  SafepointMode safepoint_mode,
729  TargetAddressStorageMode storage_mode) {
730  ASSERT(instr != NULL);
731  // Block literal pool emission to ensure nop indicating no inlined smi code
732  // is in the correct position.
733  Assembler::BlockConstPoolScope block_const_pool(masm());
734  __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
735  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
736 
737  // Signal that we don't inline smi code before these stubs in the
738  // optimizing code generator.
739  if (code->kind() == Code::BINARY_OP_IC ||
740  code->kind() == Code::COMPARE_IC) {
741  __ nop();
742  }
743 }
744 
745 
746 void LCodeGen::CallRuntime(const Runtime::Function* function,
747  int num_arguments,
748  LInstruction* instr,
749  SaveFPRegsMode save_doubles) {
750  ASSERT(instr != NULL);
751 
752  __ CallRuntime(function, num_arguments, save_doubles);
753 
754  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
755 }
756 
757 
758 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
759  if (context->IsRegister()) {
760  __ Move(cp, ToRegister(context));
761  } else if (context->IsStackSlot()) {
762  __ ldr(cp, ToMemOperand(context));
763  } else if (context->IsConstantOperand()) {
764  HConstant* constant =
765  chunk_->LookupConstant(LConstantOperand::cast(context));
766  __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
767  } else {
768  UNREACHABLE();
769  }
770 }
771 
772 
773 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
774  int argc,
775  LInstruction* instr,
776  LOperand* context) {
777  LoadContextFromDeferred(context);
778  __ CallRuntimeSaveDoubles(id);
779  RecordSafepointWithRegisters(
780  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
781 }
782 
783 
784 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
785  Safepoint::DeoptMode mode) {
786  if (!environment->HasBeenRegistered()) {
787  // Physical stack frame layout:
788  // -x ............. -4 0 ..................................... y
789  // [incoming arguments] [spill slots] [pushed outgoing arguments]
790 
791  // Layout of the environment:
792  // 0 ..................................................... size-1
793  // [parameters] [locals] [expression stack including arguments]
794 
795  // Layout of the translation:
796  // 0 ........................................................ size - 1 + 4
797  // [expression stack including arguments] [locals] [4 words] [parameters]
798  // |>------------ translation_size ------------<|
799 
800  int frame_count = 0;
801  int jsframe_count = 0;
802  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
803  ++frame_count;
804  if (e->frame_type() == JS_FUNCTION) {
805  ++jsframe_count;
806  }
807  }
808  Translation translation(&translations_, frame_count, jsframe_count, zone());
809  WriteTranslation(environment, &translation);
810  int deoptimization_index = deoptimizations_.length();
811  int pc_offset = masm()->pc_offset();
812  environment->Register(deoptimization_index,
813  translation.index(),
814  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
815  deoptimizations_.Add(environment, zone());
816  }
817 }
818 
819 
820 void LCodeGen::DeoptimizeIf(Condition condition,
821  LEnvironment* environment,
822  Deoptimizer::BailoutType bailout_type) {
823  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
824  ASSERT(environment->HasBeenRegistered());
825  int id = environment->deoptimization_index();
826  ASSERT(info()->IsOptimizing() || info()->IsStub());
827  Address entry =
828  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
829  if (entry == NULL) {
830  Abort(kBailoutWasNotPrepared);
831  return;
832  }
833 
834  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
835  Register scratch = scratch0();
836  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
837 
838  // Store the condition on the stack if necessary
839  if (condition != al) {
840  __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
841  __ mov(scratch, Operand(1), LeaveCC, condition);
842  __ push(scratch);
843  }
844 
845  __ push(r1);
846  __ mov(scratch, Operand(count));
847  __ ldr(r1, MemOperand(scratch));
848  __ sub(r1, r1, Operand(1), SetCC);
849  __ movw(r1, FLAG_deopt_every_n_times, eq);
850  __ str(r1, MemOperand(scratch));
851  __ pop(r1);
852 
853  if (condition != al) {
854  // Clean up the stack before the deoptimizer call
855  __ pop(scratch);
856  }
857 
858  __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
859 
860  // 'Restore' the condition in a slightly hacky way. (It would be better
861  // to use 'msr' and 'mrs' instructions here, but they are not supported by
862  // our ARM simulator).
863  if (condition != al) {
864  condition = ne;
865  __ cmp(scratch, Operand::Zero());
866  }
867  }
868 
869  if (info()->ShouldTrapOnDeopt()) {
870  __ stop("trap_on_deopt", condition);
871  }
872 
873  ASSERT(info()->IsStub() || frame_is_built_);
874  // Go through jump table if we need to handle condition, build frame, or
875  // restore caller doubles.
876  if (condition == al && frame_is_built_ &&
877  !info()->saves_caller_doubles()) {
878  __ Call(entry, RelocInfo::RUNTIME_ENTRY);
879  } else {
880  // We often have several deopts to the same entry, reuse the last
881  // jump entry if this is the case.
882  if (deopt_jump_table_.is_empty() ||
883  (deopt_jump_table_.last().address != entry) ||
884  (deopt_jump_table_.last().bailout_type != bailout_type) ||
885  (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
886  Deoptimizer::JumpTableEntry table_entry(entry,
887  bailout_type,
888  !frame_is_built_);
889  deopt_jump_table_.Add(table_entry, zone());
890  }
891  __ b(condition, &deopt_jump_table_.last().label);
892  }
893 }
894 
895 
896 void LCodeGen::DeoptimizeIf(Condition condition,
897  LEnvironment* environment) {
898  Deoptimizer::BailoutType bailout_type = info()->IsStub()
901  DeoptimizeIf(condition, environment, bailout_type);
902 }
903 
904 
905 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
906  int length = deoptimizations_.length();
907  if (length == 0) return;
908  Handle<DeoptimizationInputData> data =
909  factory()->NewDeoptimizationInputData(length, TENURED);
910 
911  Handle<ByteArray> translations =
912  translations_.CreateByteArray(isolate()->factory());
913  data->SetTranslationByteArray(*translations);
914  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
915  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
916  if (info_->IsOptimizing()) {
917  // Reference to shared function info does not change between phases.
918  AllowDeferredHandleDereference allow_handle_dereference;
919  data->SetSharedFunctionInfo(*info_->shared_info());
920  } else {
921  data->SetSharedFunctionInfo(Smi::FromInt(0));
922  }
923 
924  Handle<FixedArray> literals =
925  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
926  { AllowDeferredHandleDereference copy_handles;
927  for (int i = 0; i < deoptimization_literals_.length(); i++) {
928  literals->set(i, *deoptimization_literals_[i]);
929  }
930  data->SetLiteralArray(*literals);
931  }
932 
933  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
934  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
935 
936  // Populate the deoptimization entries.
937  for (int i = 0; i < length; i++) {
938  LEnvironment* env = deoptimizations_[i];
939  data->SetAstId(i, env->ast_id());
940  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
941  data->SetArgumentsStackHeight(i,
942  Smi::FromInt(env->arguments_stack_height()));
943  data->SetPc(i, Smi::FromInt(env->pc_offset()));
944  }
945  code->set_deoptimization_data(*data);
946 }
947 
948 
949 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
950  int result = deoptimization_literals_.length();
951  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
952  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
953  }
954  deoptimization_literals_.Add(literal, zone());
955  return result;
956 }
957 
958 
959 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
960  ASSERT(deoptimization_literals_.length() == 0);
961 
962  const ZoneList<Handle<JSFunction> >* inlined_closures =
963  chunk()->inlined_closures();
964 
965  for (int i = 0, length = inlined_closures->length();
966  i < length;
967  i++) {
968  DefineDeoptimizationLiteral(inlined_closures->at(i));
969  }
970 
971  inlined_function_count_ = deoptimization_literals_.length();
972 }
973 
974 
975 void LCodeGen::RecordSafepointWithLazyDeopt(
976  LInstruction* instr, SafepointMode safepoint_mode) {
977  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
978  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
979  } else {
980  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
981  RecordSafepointWithRegisters(
982  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
983  }
984 }
985 
986 
987 void LCodeGen::RecordSafepoint(
988  LPointerMap* pointers,
989  Safepoint::Kind kind,
990  int arguments,
991  Safepoint::DeoptMode deopt_mode) {
992  ASSERT(expected_safepoint_kind_ == kind);
993 
994  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
995  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
996  kind, arguments, deopt_mode);
997  for (int i = 0; i < operands->length(); i++) {
998  LOperand* pointer = operands->at(i);
999  if (pointer->IsStackSlot()) {
1000  safepoint.DefinePointerSlot(pointer->index(), zone());
1001  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1002  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1003  }
1004  }
1005  if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
1006  // Register pp always contains a pointer to the constant pool.
1007  safepoint.DefinePointerRegister(pp, zone());
1008  }
1009 }
1010 
1011 
1012 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1013  Safepoint::DeoptMode deopt_mode) {
1014  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1015 }
1016 
1017 
1018 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1019  LPointerMap empty_pointers(zone());
1020  RecordSafepoint(&empty_pointers, deopt_mode);
1021 }
1022 
1023 
1024 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1025  int arguments,
1026  Safepoint::DeoptMode deopt_mode) {
1027  RecordSafepoint(
1028  pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1029 }
1030 
1031 
1032 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
1033  LPointerMap* pointers,
1034  int arguments,
1035  Safepoint::DeoptMode deopt_mode) {
1036  RecordSafepoint(
1037  pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
1038 }
1039 
1040 
1041 void LCodeGen::RecordAndWritePosition(int position) {
1042  if (position == RelocInfo::kNoPosition) return;
1043  masm()->positions_recorder()->RecordPosition(position);
1044  masm()->positions_recorder()->WriteRecordedPositions();
1045 }
1046 
1047 
1048 static const char* LabelType(LLabel* label) {
1049  if (label->is_loop_header()) return " (loop header)";
1050  if (label->is_osr_entry()) return " (OSR entry)";
1051  return "";
1052 }
1053 
1054 
1055 void LCodeGen::DoLabel(LLabel* label) {
1056  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1057  current_instruction_,
1058  label->hydrogen_value()->id(),
1059  label->block_id(),
1060  LabelType(label));
1061  __ bind(label->label());
1062  current_block_ = label->block_id();
1063  DoGap(label);
1064 }
1065 
1066 
1067 void LCodeGen::DoParallelMove(LParallelMove* move) {
1068  resolver_.Resolve(move);
1069 }
1070 
1071 
1072 void LCodeGen::DoGap(LGap* gap) {
1073  for (int i = LGap::FIRST_INNER_POSITION;
1075  i++) {
1076  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1077  LParallelMove* move = gap->GetParallelMove(inner_pos);
1078  if (move != NULL) DoParallelMove(move);
1079  }
1080 }
1081 
1082 
1083 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1084  DoGap(instr);
1085 }
1086 
1087 
1088 void LCodeGen::DoParameter(LParameter* instr) {
1089  // Nothing to do.
1090 }
1091 
1092 
1093 void LCodeGen::DoCallStub(LCallStub* instr) {
1094  ASSERT(ToRegister(instr->context()).is(cp));
1095  ASSERT(ToRegister(instr->result()).is(r0));
1096  switch (instr->hydrogen()->major_key()) {
1097  case CodeStub::RegExpExec: {
1098  RegExpExecStub stub;
1099  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1100  break;
1101  }
1102  case CodeStub::SubString: {
1103  SubStringStub stub;
1104  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1105  break;
1106  }
1107  case CodeStub::StringCompare: {
1108  StringCompareStub stub;
1109  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1110  break;
1111  }
1112  default:
1113  UNREACHABLE();
1114  }
1115 }
1116 
1117 
1118 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1119  GenerateOsrPrologue();
1120 }
1121 
1122 
1123 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1124  Register dividend = ToRegister(instr->dividend());
1125  int32_t divisor = instr->divisor();
1126  ASSERT(dividend.is(ToRegister(instr->result())));
1127 
1128  // Theoretically, a variation of the branch-free code for integer division by
1129  // a power of 2 (calculating the remainder via an additional multiplication
1130  // (which gets simplified to an 'and') and subtraction) should be faster, and
1131  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1132  // indicate that positive dividends are heavily favored, so the branching
1133  // version performs better.
1134  HMod* hmod = instr->hydrogen();
1135  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1136  Label dividend_is_not_negative, done;
1137  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1138  __ cmp(dividend, Operand::Zero());
1139  __ b(pl, &dividend_is_not_negative);
1140  // Note that this is correct even for kMinInt operands.
1141  __ rsb(dividend, dividend, Operand::Zero());
1142  __ and_(dividend, dividend, Operand(mask));
1143  __ rsb(dividend, dividend, Operand::Zero(), SetCC);
1144  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1145  DeoptimizeIf(eq, instr->environment());
1146  }
1147  __ b(&done);
1148  }
1149 
1150  __ bind(&dividend_is_not_negative);
1151  __ and_(dividend, dividend, Operand(mask));
1152  __ bind(&done);
1153 }
1154 
1155 
1156 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1157  Register dividend = ToRegister(instr->dividend());
1158  int32_t divisor = instr->divisor();
1159  Register result = ToRegister(instr->result());
1160  ASSERT(!dividend.is(result));
1161 
1162  if (divisor == 0) {
1163  DeoptimizeIf(al, instr->environment());
1164  return;
1165  }
1166 
1167  __ TruncatingDiv(result, dividend, Abs(divisor));
1168  __ mov(ip, Operand(Abs(divisor)));
1169  __ smull(result, ip, result, ip);
1170  __ sub(result, dividend, result, SetCC);
1171 
1172  // Check for negative zero.
1173  HMod* hmod = instr->hydrogen();
1174  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1175  Label remainder_not_zero;
1176  __ b(ne, &remainder_not_zero);
1177  __ cmp(dividend, Operand::Zero());
1178  DeoptimizeIf(lt, instr->environment());
1179  __ bind(&remainder_not_zero);
1180  }
1181 }
1182 
1183 
1184 void LCodeGen::DoModI(LModI* instr) {
1185  HMod* hmod = instr->hydrogen();
1187  CpuFeatureScope scope(masm(), SUDIV);
1188 
1189  Register left_reg = ToRegister(instr->left());
1190  Register right_reg = ToRegister(instr->right());
1191  Register result_reg = ToRegister(instr->result());
1192 
1193  Label done;
1194  // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1195  // case because we can't return a NaN.
1196  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1197  __ cmp(right_reg, Operand::Zero());
1198  DeoptimizeIf(eq, instr->environment());
1199  }
1200 
1201  // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1202  // want. We have to deopt if we care about -0, because we can't return that.
1203  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1204  Label no_overflow_possible;
1205  __ cmp(left_reg, Operand(kMinInt));
1206  __ b(ne, &no_overflow_possible);
1207  __ cmp(right_reg, Operand(-1));
1208  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1209  DeoptimizeIf(eq, instr->environment());
1210  } else {
1211  __ b(ne, &no_overflow_possible);
1212  __ mov(result_reg, Operand::Zero());
1213  __ jmp(&done);
1214  }
1215  __ bind(&no_overflow_possible);
1216  }
1217 
1218  // For 'r3 = r1 % r2' we can have the following ARM code:
1219  // sdiv r3, r1, r2
1220  // mls r3, r3, r2, r1
1221 
1222  __ sdiv(result_reg, left_reg, right_reg);
1223  __ mls(result_reg, result_reg, right_reg, left_reg);
1224 
1225  // If we care about -0, test if the dividend is <0 and the result is 0.
1226  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1227  __ cmp(result_reg, Operand::Zero());
1228  __ b(ne, &done);
1229  __ cmp(left_reg, Operand::Zero());
1230  DeoptimizeIf(lt, instr->environment());
1231  }
1232  __ bind(&done);
1233 
1234  } else {
1235  // General case, without any SDIV support.
1236  Register left_reg = ToRegister(instr->left());
1237  Register right_reg = ToRegister(instr->right());
1238  Register result_reg = ToRegister(instr->result());
1239  Register scratch = scratch0();
1240  ASSERT(!scratch.is(left_reg));
1241  ASSERT(!scratch.is(right_reg));
1242  ASSERT(!scratch.is(result_reg));
1243  DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1244  DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1245  ASSERT(!divisor.is(dividend));
1246  LowDwVfpRegister quotient = double_scratch0();
1247  ASSERT(!quotient.is(dividend));
1248  ASSERT(!quotient.is(divisor));
1249 
1250  Label done;
1251  // Check for x % 0, we have to deopt in this case because we can't return a
1252  // NaN.
1253  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1254  __ cmp(right_reg, Operand::Zero());
1255  DeoptimizeIf(eq, instr->environment());
1256  }
1257 
1258  __ Move(result_reg, left_reg);
1259  // Load the arguments in VFP registers. The divisor value is preloaded
1260  // before. Be careful that 'right_reg' is only live on entry.
1261  // TODO(svenpanne) The last comments seems to be wrong nowadays.
1262  __ vmov(double_scratch0().low(), left_reg);
1263  __ vcvt_f64_s32(dividend, double_scratch0().low());
1264  __ vmov(double_scratch0().low(), right_reg);
1265  __ vcvt_f64_s32(divisor, double_scratch0().low());
1266 
1267  // We do not care about the sign of the divisor. Note that we still handle
1268  // the kMinInt % -1 case correctly, though.
1269  __ vabs(divisor, divisor);
1270  // Compute the quotient and round it to a 32bit integer.
1271  __ vdiv(quotient, dividend, divisor);
1272  __ vcvt_s32_f64(quotient.low(), quotient);
1273  __ vcvt_f64_s32(quotient, quotient.low());
1274 
1275  // Compute the remainder in result.
1276  __ vmul(double_scratch0(), divisor, quotient);
1277  __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1278  __ vmov(scratch, double_scratch0().low());
1279  __ sub(result_reg, left_reg, scratch, SetCC);
1280 
1281  // If we care about -0, test if the dividend is <0 and the result is 0.
1282  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1283  __ b(ne, &done);
1284  __ cmp(left_reg, Operand::Zero());
1285  DeoptimizeIf(mi, instr->environment());
1286  }
1287  __ bind(&done);
1288  }
1289 }
1290 
1291 
1292 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1293  Register dividend = ToRegister(instr->dividend());
1294  int32_t divisor = instr->divisor();
1295  Register result = ToRegister(instr->result());
1296  ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
1297  ASSERT(!result.is(dividend));
1298 
1299  // Check for (0 / -x) that will produce negative zero.
1300  HDiv* hdiv = instr->hydrogen();
1301  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1302  __ cmp(dividend, Operand::Zero());
1303  DeoptimizeIf(eq, instr->environment());
1304  }
1305  // Check for (kMinInt / -1).
1306  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1307  __ cmp(dividend, Operand(kMinInt));
1308  DeoptimizeIf(eq, instr->environment());
1309  }
1310  // Deoptimize if remainder will not be 0.
1311  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1312  divisor != 1 && divisor != -1) {
1313  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1314  __ tst(dividend, Operand(mask));
1315  DeoptimizeIf(ne, instr->environment());
1316  }
1317 
1318  if (divisor == -1) { // Nice shortcut, not needed for correctness.
1319  __ rsb(result, dividend, Operand(0));
1320  return;
1321  }
1322  int32_t shift = WhichPowerOf2Abs(divisor);
1323  if (shift == 0) {
1324  __ mov(result, dividend);
1325  } else if (shift == 1) {
1326  __ add(result, dividend, Operand(dividend, LSR, 31));
1327  } else {
1328  __ mov(result, Operand(dividend, ASR, 31));
1329  __ add(result, dividend, Operand(result, LSR, 32 - shift));
1330  }
1331  if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1332  if (divisor < 0) __ rsb(result, result, Operand(0));
1333 }
1334 
1335 
1336 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1337  Register dividend = ToRegister(instr->dividend());
1338  int32_t divisor = instr->divisor();
1339  Register result = ToRegister(instr->result());
1340  ASSERT(!dividend.is(result));
1341 
1342  if (divisor == 0) {
1343  DeoptimizeIf(al, instr->environment());
1344  return;
1345  }
1346 
1347  // Check for (0 / -x) that will produce negative zero.
1348  HDiv* hdiv = instr->hydrogen();
1349  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1350  __ cmp(dividend, Operand::Zero());
1351  DeoptimizeIf(eq, instr->environment());
1352  }
1353 
1354  __ TruncatingDiv(result, dividend, Abs(divisor));
1355  if (divisor < 0) __ rsb(result, result, Operand::Zero());
1356 
1357  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1358  __ mov(ip, Operand(divisor));
1359  __ smull(scratch0(), ip, result, ip);
1360  __ sub(scratch0(), scratch0(), dividend, SetCC);
1361  DeoptimizeIf(ne, instr->environment());
1362  }
1363 }
1364 
1365 
1366 void LCodeGen::DoDivI(LDivI* instr) {
1367  HBinaryOperation* hdiv = instr->hydrogen();
1368  Register left = ToRegister(instr->left());
1369  Register right = ToRegister(instr->right());
1370  Register result = ToRegister(instr->result());
1371 
1372  // Check for x / 0.
1373  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1374  __ cmp(right, Operand::Zero());
1375  DeoptimizeIf(eq, instr->environment());
1376  }
1377 
1378  // Check for (0 / -x) that will produce negative zero.
1379  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1380  Label positive;
1381  if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1382  // Do the test only if it hadn't be done above.
1383  __ cmp(right, Operand::Zero());
1384  }
1385  __ b(pl, &positive);
1386  __ cmp(left, Operand::Zero());
1387  DeoptimizeIf(eq, instr->environment());
1388  __ bind(&positive);
1389  }
1390 
1391  // Check for (kMinInt / -1).
1392  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1394  !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1395  // We don't need to check for overflow when truncating with sdiv
1396  // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1397  __ cmp(left, Operand(kMinInt));
1398  __ cmp(right, Operand(-1), eq);
1399  DeoptimizeIf(eq, instr->environment());
1400  }
1401 
1403  CpuFeatureScope scope(masm(), SUDIV);
1404  __ sdiv(result, left, right);
1405  } else {
1406  DoubleRegister vleft = ToDoubleRegister(instr->temp());
1407  DoubleRegister vright = double_scratch0();
1408  __ vmov(double_scratch0().low(), left);
1409  __ vcvt_f64_s32(vleft, double_scratch0().low());
1410  __ vmov(double_scratch0().low(), right);
1411  __ vcvt_f64_s32(vright, double_scratch0().low());
1412  __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1413  __ vcvt_s32_f64(double_scratch0().low(), vleft);
1414  __ vmov(result, double_scratch0().low());
1415  }
1416 
1417  if (hdiv->IsMathFloorOfDiv()) {
1418  Label done;
1419  Register remainder = scratch0();
1420  __ mls(remainder, result, right, left);
1421  __ cmp(remainder, Operand::Zero());
1422  __ b(eq, &done);
1423  __ eor(remainder, remainder, Operand(right));
1424  __ add(result, result, Operand(remainder, ASR, 31));
1425  __ bind(&done);
1426  } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1427  // Compute remainder and deopt if it's not zero.
1428  Register remainder = scratch0();
1429  __ mls(remainder, result, right, left);
1430  __ cmp(remainder, Operand::Zero());
1431  DeoptimizeIf(ne, instr->environment());
1432  }
1433 }
1434 
1435 
1436 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1437  DwVfpRegister addend = ToDoubleRegister(instr->addend());
1438  DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1439  DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1440 
1441  // This is computed in-place.
1442  ASSERT(addend.is(ToDoubleRegister(instr->result())));
1443 
1444  __ vmla(addend, multiplier, multiplicand);
1445 }
1446 
1447 
1448 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1449  DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1450  DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1451  DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1452 
1453  // This is computed in-place.
1454  ASSERT(minuend.is(ToDoubleRegister(instr->result())));
1455 
1456  __ vmls(minuend, multiplier, multiplicand);
1457 }
1458 
1459 
1460 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1461  Register dividend = ToRegister(instr->dividend());
1462  Register result = ToRegister(instr->result());
1463  int32_t divisor = instr->divisor();
1464 
1465  // If the divisor is positive, things are easy: There can be no deopts and we
1466  // can simply do an arithmetic right shift.
1467  if (divisor == 1) return;
1468  int32_t shift = WhichPowerOf2Abs(divisor);
1469  if (divisor > 1) {
1470  __ mov(result, Operand(dividend, ASR, shift));
1471  return;
1472  }
1473 
1474  // If the divisor is negative, we have to negate and handle edge cases.
1475  __ rsb(result, dividend, Operand::Zero(), SetCC);
1476  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1477  DeoptimizeIf(eq, instr->environment());
1478  }
1479  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1480  // Note that we could emit branch-free code, but that would need one more
1481  // register.
1482  if (divisor == -1) {
1483  DeoptimizeIf(vs, instr->environment());
1484  __ mov(result, Operand(dividend, ASR, shift));
1485  } else {
1486  __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1487  __ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc);
1488  }
1489  } else {
1490  __ mov(result, Operand(dividend, ASR, shift));
1491  }
1492 }
1493 
1494 
1495 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1496  Register dividend = ToRegister(instr->dividend());
1497  int32_t divisor = instr->divisor();
1498  Register result = ToRegister(instr->result());
1499  ASSERT(!dividend.is(result));
1500 
1501  if (divisor == 0) {
1502  DeoptimizeIf(al, instr->environment());
1503  return;
1504  }
1505 
1506  // Check for (0 / -x) that will produce negative zero.
1507  HMathFloorOfDiv* hdiv = instr->hydrogen();
1508  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1509  __ cmp(dividend, Operand::Zero());
1510  DeoptimizeIf(eq, instr->environment());
1511  }
1512 
1513  // Easy case: We need no dynamic check for the dividend and the flooring
1514  // division is the same as the truncating division.
1515  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1516  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1517  __ TruncatingDiv(result, dividend, Abs(divisor));
1518  if (divisor < 0) __ rsb(result, result, Operand::Zero());
1519  return;
1520  }
1521 
1522  // In the general case we may need to adjust before and after the truncating
1523  // division to get a flooring division.
1524  Register temp = ToRegister(instr->temp());
1525  ASSERT(!temp.is(dividend) && !temp.is(result));
1526  Label needs_adjustment, done;
1527  __ cmp(dividend, Operand::Zero());
1528  __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1529  __ TruncatingDiv(result, dividend, Abs(divisor));
1530  if (divisor < 0) __ rsb(result, result, Operand::Zero());
1531  __ jmp(&done);
1532  __ bind(&needs_adjustment);
1533  __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1534  __ TruncatingDiv(result, temp, Abs(divisor));
1535  if (divisor < 0) __ rsb(result, result, Operand::Zero());
1536  __ sub(result, result, Operand(1));
1537  __ bind(&done);
1538 }
1539 
1540 
1541 void LCodeGen::DoMulI(LMulI* instr) {
1542  Register result = ToRegister(instr->result());
1543  // Note that result may alias left.
1544  Register left = ToRegister(instr->left());
1545  LOperand* right_op = instr->right();
1546 
1547  bool bailout_on_minus_zero =
1548  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1549  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1550 
1551  if (right_op->IsConstantOperand()) {
1552  int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1553 
1554  if (bailout_on_minus_zero && (constant < 0)) {
1555  // The case of a null constant will be handled separately.
1556  // If constant is negative and left is null, the result should be -0.
1557  __ cmp(left, Operand::Zero());
1558  DeoptimizeIf(eq, instr->environment());
1559  }
1560 
1561  switch (constant) {
1562  case -1:
1563  if (overflow) {
1564  __ rsb(result, left, Operand::Zero(), SetCC);
1565  DeoptimizeIf(vs, instr->environment());
1566  } else {
1567  __ rsb(result, left, Operand::Zero());
1568  }
1569  break;
1570  case 0:
1571  if (bailout_on_minus_zero) {
1572  // If left is strictly negative and the constant is null, the
1573  // result is -0. Deoptimize if required, otherwise return 0.
1574  __ cmp(left, Operand::Zero());
1575  DeoptimizeIf(mi, instr->environment());
1576  }
1577  __ mov(result, Operand::Zero());
1578  break;
1579  case 1:
1580  __ Move(result, left);
1581  break;
1582  default:
1583  // Multiplying by powers of two and powers of two plus or minus
1584  // one can be done faster with shifted operands.
1585  // For other constants we emit standard code.
1586  int32_t mask = constant >> 31;
1587  uint32_t constant_abs = (constant + mask) ^ mask;
1588 
1589  if (IsPowerOf2(constant_abs)) {
1590  int32_t shift = WhichPowerOf2(constant_abs);
1591  __ mov(result, Operand(left, LSL, shift));
1592  // Correct the sign of the result is the constant is negative.
1593  if (constant < 0) __ rsb(result, result, Operand::Zero());
1594  } else if (IsPowerOf2(constant_abs - 1)) {
1595  int32_t shift = WhichPowerOf2(constant_abs - 1);
1596  __ add(result, left, Operand(left, LSL, shift));
1597  // Correct the sign of the result is the constant is negative.
1598  if (constant < 0) __ rsb(result, result, Operand::Zero());
1599  } else if (IsPowerOf2(constant_abs + 1)) {
1600  int32_t shift = WhichPowerOf2(constant_abs + 1);
1601  __ rsb(result, left, Operand(left, LSL, shift));
1602  // Correct the sign of the result is the constant is negative.
1603  if (constant < 0) __ rsb(result, result, Operand::Zero());
1604  } else {
1605  // Generate standard code.
1606  __ mov(ip, Operand(constant));
1607  __ mul(result, left, ip);
1608  }
1609  }
1610 
1611  } else {
1612  ASSERT(right_op->IsRegister());
1613  Register right = ToRegister(right_op);
1614 
1615  if (overflow) {
1616  Register scratch = scratch0();
1617  // scratch:result = left * right.
1618  if (instr->hydrogen()->representation().IsSmi()) {
1619  __ SmiUntag(result, left);
1620  __ smull(result, scratch, result, right);
1621  } else {
1622  __ smull(result, scratch, left, right);
1623  }
1624  __ cmp(scratch, Operand(result, ASR, 31));
1625  DeoptimizeIf(ne, instr->environment());
1626  } else {
1627  if (instr->hydrogen()->representation().IsSmi()) {
1628  __ SmiUntag(result, left);
1629  __ mul(result, result, right);
1630  } else {
1631  __ mul(result, left, right);
1632  }
1633  }
1634 
1635  if (bailout_on_minus_zero) {
1636  Label done;
1637  __ teq(left, Operand(right));
1638  __ b(pl, &done);
1639  // Bail out if the result is minus zero.
1640  __ cmp(result, Operand::Zero());
1641  DeoptimizeIf(eq, instr->environment());
1642  __ bind(&done);
1643  }
1644  }
1645 }
1646 
1647 
1648 void LCodeGen::DoBitI(LBitI* instr) {
1649  LOperand* left_op = instr->left();
1650  LOperand* right_op = instr->right();
1651  ASSERT(left_op->IsRegister());
1652  Register left = ToRegister(left_op);
1653  Register result = ToRegister(instr->result());
1654  Operand right(no_reg);
1655 
1656  if (right_op->IsStackSlot()) {
1657  right = Operand(EmitLoadRegister(right_op, ip));
1658  } else {
1659  ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1660  right = ToOperand(right_op);
1661  }
1662 
1663  switch (instr->op()) {
1664  case Token::BIT_AND:
1665  __ and_(result, left, right);
1666  break;
1667  case Token::BIT_OR:
1668  __ orr(result, left, right);
1669  break;
1670  case Token::BIT_XOR:
1671  if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1672  __ mvn(result, Operand(left));
1673  } else {
1674  __ eor(result, left, right);
1675  }
1676  break;
1677  default:
1678  UNREACHABLE();
1679  break;
1680  }
1681 }
1682 
1683 
1684 void LCodeGen::DoShiftI(LShiftI* instr) {
1685  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1686  // result may alias either of them.
1687  LOperand* right_op = instr->right();
1688  Register left = ToRegister(instr->left());
1689  Register result = ToRegister(instr->result());
1690  Register scratch = scratch0();
1691  if (right_op->IsRegister()) {
1692  // Mask the right_op operand.
1693  __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1694  switch (instr->op()) {
1695  case Token::ROR:
1696  __ mov(result, Operand(left, ROR, scratch));
1697  break;
1698  case Token::SAR:
1699  __ mov(result, Operand(left, ASR, scratch));
1700  break;
1701  case Token::SHR:
1702  if (instr->can_deopt()) {
1703  __ mov(result, Operand(left, LSR, scratch), SetCC);
1704  DeoptimizeIf(mi, instr->environment());
1705  } else {
1706  __ mov(result, Operand(left, LSR, scratch));
1707  }
1708  break;
1709  case Token::SHL:
1710  __ mov(result, Operand(left, LSL, scratch));
1711  break;
1712  default:
1713  UNREACHABLE();
1714  break;
1715  }
1716  } else {
1717  // Mask the right_op operand.
1718  int value = ToInteger32(LConstantOperand::cast(right_op));
1719  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1720  switch (instr->op()) {
1721  case Token::ROR:
1722  if (shift_count != 0) {
1723  __ mov(result, Operand(left, ROR, shift_count));
1724  } else {
1725  __ Move(result, left);
1726  }
1727  break;
1728  case Token::SAR:
1729  if (shift_count != 0) {
1730  __ mov(result, Operand(left, ASR, shift_count));
1731  } else {
1732  __ Move(result, left);
1733  }
1734  break;
1735  case Token::SHR:
1736  if (shift_count != 0) {
1737  __ mov(result, Operand(left, LSR, shift_count));
1738  } else {
1739  if (instr->can_deopt()) {
1740  __ tst(left, Operand(0x80000000));
1741  DeoptimizeIf(ne, instr->environment());
1742  }
1743  __ Move(result, left);
1744  }
1745  break;
1746  case Token::SHL:
1747  if (shift_count != 0) {
1748  if (instr->hydrogen_value()->representation().IsSmi() &&
1749  instr->can_deopt()) {
1750  if (shift_count != 1) {
1751  __ mov(result, Operand(left, LSL, shift_count - 1));
1752  __ SmiTag(result, result, SetCC);
1753  } else {
1754  __ SmiTag(result, left, SetCC);
1755  }
1756  DeoptimizeIf(vs, instr->environment());
1757  } else {
1758  __ mov(result, Operand(left, LSL, shift_count));
1759  }
1760  } else {
1761  __ Move(result, left);
1762  }
1763  break;
1764  default:
1765  UNREACHABLE();
1766  break;
1767  }
1768  }
1769 }
1770 
1771 
1772 void LCodeGen::DoSubI(LSubI* instr) {
1773  LOperand* left = instr->left();
1774  LOperand* right = instr->right();
1775  LOperand* result = instr->result();
1776  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1777  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1778 
1779  if (right->IsStackSlot()) {
1780  Register right_reg = EmitLoadRegister(right, ip);
1781  __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1782  } else {
1783  ASSERT(right->IsRegister() || right->IsConstantOperand());
1784  __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1785  }
1786 
1787  if (can_overflow) {
1788  DeoptimizeIf(vs, instr->environment());
1789  }
1790 }
1791 
1792 
1793 void LCodeGen::DoRSubI(LRSubI* instr) {
1794  LOperand* left = instr->left();
1795  LOperand* right = instr->right();
1796  LOperand* result = instr->result();
1797  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1798  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1799 
1800  if (right->IsStackSlot()) {
1801  Register right_reg = EmitLoadRegister(right, ip);
1802  __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1803  } else {
1804  ASSERT(right->IsRegister() || right->IsConstantOperand());
1805  __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1806  }
1807 
1808  if (can_overflow) {
1809  DeoptimizeIf(vs, instr->environment());
1810  }
1811 }
1812 
1813 
1814 void LCodeGen::DoConstantI(LConstantI* instr) {
1815  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1816 }
1817 
1818 
1819 void LCodeGen::DoConstantS(LConstantS* instr) {
1820  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1821 }
1822 
1823 
1824 void LCodeGen::DoConstantD(LConstantD* instr) {
1825  ASSERT(instr->result()->IsDoubleRegister());
1826  DwVfpRegister result = ToDoubleRegister(instr->result());
1827  double v = instr->value();
1828  __ Vmov(result, v, scratch0());
1829 }
1830 
1831 
1832 void LCodeGen::DoConstantE(LConstantE* instr) {
1833  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1834 }
1835 
1836 
1837 void LCodeGen::DoConstantT(LConstantT* instr) {
1838  Handle<Object> value = instr->value(isolate());
1840  __ Move(ToRegister(instr->result()), value);
1841 }
1842 
1843 
1844 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1845  Register result = ToRegister(instr->result());
1846  Register map = ToRegister(instr->value());
1847  __ EnumLength(result, map);
1848 }
1849 
1850 
1851 void LCodeGen::DoDateField(LDateField* instr) {
1852  Register object = ToRegister(instr->date());
1853  Register result = ToRegister(instr->result());
1854  Register scratch = ToRegister(instr->temp());
1855  Smi* index = instr->index();
1856  Label runtime, done;
1857  ASSERT(object.is(result));
1858  ASSERT(object.is(r0));
1859  ASSERT(!scratch.is(scratch0()));
1860  ASSERT(!scratch.is(object));
1861 
1862  __ SmiTst(object);
1863  DeoptimizeIf(eq, instr->environment());
1864  __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1865  DeoptimizeIf(ne, instr->environment());
1866 
1867  if (index->value() == 0) {
1868  __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1869  } else {
1870  if (index->value() < JSDate::kFirstUncachedField) {
1871  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1872  __ mov(scratch, Operand(stamp));
1873  __ ldr(scratch, MemOperand(scratch));
1874  __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1875  __ cmp(scratch, scratch0());
1876  __ b(ne, &runtime);
1877  __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1878  kPointerSize * index->value()));
1879  __ jmp(&done);
1880  }
1881  __ bind(&runtime);
1882  __ PrepareCallCFunction(2, scratch);
1883  __ mov(r1, Operand(index));
1884  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1885  __ bind(&done);
1886  }
1887 }
1888 
1889 
1890 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1891  LOperand* index,
1892  String::Encoding encoding) {
1893  if (index->IsConstantOperand()) {
1894  int offset = ToInteger32(LConstantOperand::cast(index));
1895  if (encoding == String::TWO_BYTE_ENCODING) {
1896  offset *= kUC16Size;
1897  }
1898  STATIC_ASSERT(kCharSize == 1);
1899  return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1900  }
1901  Register scratch = scratch0();
1902  ASSERT(!scratch.is(string));
1903  ASSERT(!scratch.is(ToRegister(index)));
1904  if (encoding == String::ONE_BYTE_ENCODING) {
1905  __ add(scratch, string, Operand(ToRegister(index)));
1906  } else {
1907  STATIC_ASSERT(kUC16Size == 2);
1908  __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1909  }
1910  return FieldMemOperand(scratch, SeqString::kHeaderSize);
1911 }
1912 
1913 
1914 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1915  String::Encoding encoding = instr->hydrogen()->encoding();
1916  Register string = ToRegister(instr->string());
1917  Register result = ToRegister(instr->result());
1918 
1919  if (FLAG_debug_code) {
1920  Register scratch = scratch0();
1921  __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1922  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1923 
1924  __ and_(scratch, scratch,
1926  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1927  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1928  __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1929  ? one_byte_seq_type : two_byte_seq_type));
1930  __ Check(eq, kUnexpectedStringType);
1931  }
1932 
1933  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1934  if (encoding == String::ONE_BYTE_ENCODING) {
1935  __ ldrb(result, operand);
1936  } else {
1937  __ ldrh(result, operand);
1938  }
1939 }
1940 
1941 
1942 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1943  String::Encoding encoding = instr->hydrogen()->encoding();
1944  Register string = ToRegister(instr->string());
1945  Register value = ToRegister(instr->value());
1946 
1947  if (FLAG_debug_code) {
1948  Register index = ToRegister(instr->index());
1949  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1950  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1951  int encoding_mask =
1952  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1953  ? one_byte_seq_type : two_byte_seq_type;
1954  __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1955  }
1956 
1957  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1958  if (encoding == String::ONE_BYTE_ENCODING) {
1959  __ strb(value, operand);
1960  } else {
1961  __ strh(value, operand);
1962  }
1963 }
1964 
1965 
1966 void LCodeGen::DoAddI(LAddI* instr) {
1967  LOperand* left = instr->left();
1968  LOperand* right = instr->right();
1969  LOperand* result = instr->result();
1970  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1971  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1972 
1973  if (right->IsStackSlot()) {
1974  Register right_reg = EmitLoadRegister(right, ip);
1975  __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1976  } else {
1977  ASSERT(right->IsRegister() || right->IsConstantOperand());
1978  __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1979  }
1980 
1981  if (can_overflow) {
1982  DeoptimizeIf(vs, instr->environment());
1983  }
1984 }
1985 
1986 
1987 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1988  LOperand* left = instr->left();
1989  LOperand* right = instr->right();
1990  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1991  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1992  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1993  Register left_reg = ToRegister(left);
1994  Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1995  ? ToOperand(right)
1996  : Operand(EmitLoadRegister(right, ip));
1997  Register result_reg = ToRegister(instr->result());
1998  __ cmp(left_reg, right_op);
1999  __ Move(result_reg, left_reg, condition);
2000  __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
2001  } else {
2002  ASSERT(instr->hydrogen()->representation().IsDouble());
2003  DwVfpRegister left_reg = ToDoubleRegister(left);
2004  DwVfpRegister right_reg = ToDoubleRegister(right);
2005  DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2006  Label result_is_nan, return_left, return_right, check_zero, done;
2007  __ VFPCompareAndSetFlags(left_reg, right_reg);
2008  if (operation == HMathMinMax::kMathMin) {
2009  __ b(mi, &return_left);
2010  __ b(gt, &return_right);
2011  } else {
2012  __ b(mi, &return_right);
2013  __ b(gt, &return_left);
2014  }
2015  __ b(vs, &result_is_nan);
2016  // Left equals right => check for -0.
2017  __ VFPCompareAndSetFlags(left_reg, 0.0);
2018  if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2019  __ b(ne, &done); // left == right != 0.
2020  } else {
2021  __ b(ne, &return_left); // left == right != 0.
2022  }
2023  // At this point, both left and right are either 0 or -0.
2024  if (operation == HMathMinMax::kMathMin) {
2025  // We could use a single 'vorr' instruction here if we had NEON support.
2026  __ vneg(left_reg, left_reg);
2027  __ vsub(result_reg, left_reg, right_reg);
2028  __ vneg(result_reg, result_reg);
2029  } else {
2030  // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2031  // the decision for vadd is easy because vand is a NEON instruction.
2032  __ vadd(result_reg, left_reg, right_reg);
2033  }
2034  __ b(&done);
2035 
2036  __ bind(&result_is_nan);
2037  __ vadd(result_reg, left_reg, right_reg);
2038  __ b(&done);
2039 
2040  __ bind(&return_right);
2041  __ Move(result_reg, right_reg);
2042  if (!left_reg.is(result_reg)) {
2043  __ b(&done);
2044  }
2045 
2046  __ bind(&return_left);
2047  __ Move(result_reg, left_reg);
2048 
2049  __ bind(&done);
2050  }
2051 }
2052 
2053 
2054 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2055  DwVfpRegister left = ToDoubleRegister(instr->left());
2056  DwVfpRegister right = ToDoubleRegister(instr->right());
2057  DwVfpRegister result = ToDoubleRegister(instr->result());
2058  switch (instr->op()) {
2059  case Token::ADD:
2060  __ vadd(result, left, right);
2061  break;
2062  case Token::SUB:
2063  __ vsub(result, left, right);
2064  break;
2065  case Token::MUL:
2066  __ vmul(result, left, right);
2067  break;
2068  case Token::DIV:
2069  __ vdiv(result, left, right);
2070  break;
2071  case Token::MOD: {
2072  __ PrepareCallCFunction(0, 2, scratch0());
2073  __ MovToFloatParameters(left, right);
2074  __ CallCFunction(
2075  ExternalReference::mod_two_doubles_operation(isolate()),
2076  0, 2);
2077  // Move the result in the double result register.
2078  __ MovFromFloatResult(result);
2079  break;
2080  }
2081  default:
2082  UNREACHABLE();
2083  break;
2084  }
2085 }
2086 
2087 
2088 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2089  ASSERT(ToRegister(instr->context()).is(cp));
2090  ASSERT(ToRegister(instr->left()).is(r1));
2091  ASSERT(ToRegister(instr->right()).is(r0));
2092  ASSERT(ToRegister(instr->result()).is(r0));
2093 
2094  BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
2095  // Block literal pool emission to ensure nop indicating no inlined smi code
2096  // is in the correct position.
2097  Assembler::BlockConstPoolScope block_const_pool(masm());
2098  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2099 }
2100 
2101 
2102 template<class InstrType>
2103 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2104  int left_block = instr->TrueDestination(chunk_);
2105  int right_block = instr->FalseDestination(chunk_);
2106 
2107  int next_block = GetNextEmittedBlock();
2108 
2109  if (right_block == left_block || condition == al) {
2110  EmitGoto(left_block);
2111  } else if (left_block == next_block) {
2112  __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2113  } else if (right_block == next_block) {
2114  __ b(condition, chunk_->GetAssemblyLabel(left_block));
2115  } else {
2116  __ b(condition, chunk_->GetAssemblyLabel(left_block));
2117  __ b(chunk_->GetAssemblyLabel(right_block));
2118  }
2119 }
2120 
2121 
2122 template<class InstrType>
2123 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2124  int false_block = instr->FalseDestination(chunk_);
2125  __ b(condition, chunk_->GetAssemblyLabel(false_block));
2126 }
2127 
2128 
2129 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2130  __ stop("LBreak");
2131 }
2132 
2133 
2134 void LCodeGen::DoBranch(LBranch* instr) {
2135  Representation r = instr->hydrogen()->value()->representation();
2136  if (r.IsInteger32() || r.IsSmi()) {
2137  ASSERT(!info()->IsStub());
2138  Register reg = ToRegister(instr->value());
2139  __ cmp(reg, Operand::Zero());
2140  EmitBranch(instr, ne);
2141  } else if (r.IsDouble()) {
2142  ASSERT(!info()->IsStub());
2143  DwVfpRegister reg = ToDoubleRegister(instr->value());
2144  // Test the double value. Zero and NaN are false.
2145  __ VFPCompareAndSetFlags(reg, 0.0);
2146  __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2147  EmitBranch(instr, ne);
2148  } else {
2149  ASSERT(r.IsTagged());
2150  Register reg = ToRegister(instr->value());
2151  HType type = instr->hydrogen()->value()->type();
2152  if (type.IsBoolean()) {
2153  ASSERT(!info()->IsStub());
2154  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2155  EmitBranch(instr, eq);
2156  } else if (type.IsSmi()) {
2157  ASSERT(!info()->IsStub());
2158  __ cmp(reg, Operand::Zero());
2159  EmitBranch(instr, ne);
2160  } else if (type.IsJSArray()) {
2161  ASSERT(!info()->IsStub());
2162  EmitBranch(instr, al);
2163  } else if (type.IsHeapNumber()) {
2164  ASSERT(!info()->IsStub());
2165  DwVfpRegister dbl_scratch = double_scratch0();
2166  __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2167  // Test the double value. Zero and NaN are false.
2168  __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2169  __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2170  EmitBranch(instr, ne);
2171  } else if (type.IsString()) {
2172  ASSERT(!info()->IsStub());
2174  __ cmp(ip, Operand::Zero());
2175  EmitBranch(instr, ne);
2176  } else {
2177  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2178  // Avoid deopts in the case where we've never executed this path before.
2179  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2180 
2181  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2182  // undefined -> false.
2183  __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2184  __ b(eq, instr->FalseLabel(chunk_));
2185  }
2186  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2187  // Boolean -> its value.
2188  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2189  __ b(eq, instr->TrueLabel(chunk_));
2190  __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2191  __ b(eq, instr->FalseLabel(chunk_));
2192  }
2193  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2194  // 'null' -> false.
2195  __ CompareRoot(reg, Heap::kNullValueRootIndex);
2196  __ b(eq, instr->FalseLabel(chunk_));
2197  }
2198 
2199  if (expected.Contains(ToBooleanStub::SMI)) {
2200  // Smis: 0 -> false, all other -> true.
2201  __ cmp(reg, Operand::Zero());
2202  __ b(eq, instr->FalseLabel(chunk_));
2203  __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2204  } else if (expected.NeedsMap()) {
2205  // If we need a map later and have a Smi -> deopt.
2206  __ SmiTst(reg);
2207  DeoptimizeIf(eq, instr->environment());
2208  }
2209 
2210  const Register map = scratch0();
2211  if (expected.NeedsMap()) {
2212  __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2213 
2214  if (expected.CanBeUndetectable()) {
2215  // Undetectable -> false.
2217  __ tst(ip, Operand(1 << Map::kIsUndetectable));
2218  __ b(ne, instr->FalseLabel(chunk_));
2219  }
2220  }
2221 
2222  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2223  // spec object -> true.
2224  __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2225  __ b(ge, instr->TrueLabel(chunk_));
2226  }
2227 
2228  if (expected.Contains(ToBooleanStub::STRING)) {
2229  // String value -> false iff empty.
2230  Label not_string;
2231  __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2232  __ b(ge, &not_string);
2234  __ cmp(ip, Operand::Zero());
2235  __ b(ne, instr->TrueLabel(chunk_));
2236  __ b(instr->FalseLabel(chunk_));
2237  __ bind(&not_string);
2238  }
2239 
2240  if (expected.Contains(ToBooleanStub::SYMBOL)) {
2241  // Symbol value -> true.
2242  __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2243  __ b(eq, instr->TrueLabel(chunk_));
2244  }
2245 
2246  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2247  // heap number -> false iff +0, -0, or NaN.
2248  DwVfpRegister dbl_scratch = double_scratch0();
2249  Label not_heap_number;
2250  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2251  __ b(ne, &not_heap_number);
2252  __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2253  __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2254  __ cmp(r0, r0, vs); // NaN -> false.
2255  __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2256  __ b(instr->TrueLabel(chunk_));
2257  __ bind(&not_heap_number);
2258  }
2259 
2260  if (!expected.IsGeneric()) {
2261  // We've seen something for the first time -> deopt.
2262  // This can only happen if we are not generic already.
2263  DeoptimizeIf(al, instr->environment());
2264  }
2265  }
2266  }
2267 }
2268 
2269 
2270 void LCodeGen::EmitGoto(int block) {
2271  if (!IsNextEmittedBlock(block)) {
2272  __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2273  }
2274 }
2275 
2276 
2277 void LCodeGen::DoGoto(LGoto* instr) {
2278  EmitGoto(instr->block_id());
2279 }
2280 
2281 
2282 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2283  Condition cond = kNoCondition;
2284  switch (op) {
2285  case Token::EQ:
2286  case Token::EQ_STRICT:
2287  cond = eq;
2288  break;
2289  case Token::NE:
2290  case Token::NE_STRICT:
2291  cond = ne;
2292  break;
2293  case Token::LT:
2294  cond = is_unsigned ? lo : lt;
2295  break;
2296  case Token::GT:
2297  cond = is_unsigned ? hi : gt;
2298  break;
2299  case Token::LTE:
2300  cond = is_unsigned ? ls : le;
2301  break;
2302  case Token::GTE:
2303  cond = is_unsigned ? hs : ge;
2304  break;
2305  case Token::IN:
2306  case Token::INSTANCEOF:
2307  default:
2308  UNREACHABLE();
2309  }
2310  return cond;
2311 }
2312 
2313 
2314 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2315  LOperand* left = instr->left();
2316  LOperand* right = instr->right();
2317  Condition cond = TokenToCondition(instr->op(), false);
2318 
2319  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2320  // We can statically evaluate the comparison.
2321  double left_val = ToDouble(LConstantOperand::cast(left));
2322  double right_val = ToDouble(LConstantOperand::cast(right));
2323  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2324  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2325  EmitGoto(next_block);
2326  } else {
2327  if (instr->is_double()) {
2328  // Compare left and right operands as doubles and load the
2329  // resulting flags into the normal status register.
2330  __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2331  // If a NaN is involved, i.e. the result is unordered (V set),
2332  // jump to false block label.
2333  __ b(vs, instr->FalseLabel(chunk_));
2334  } else {
2335  if (right->IsConstantOperand()) {
2336  int32_t value = ToInteger32(LConstantOperand::cast(right));
2337  if (instr->hydrogen_value()->representation().IsSmi()) {
2338  __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2339  } else {
2340  __ cmp(ToRegister(left), Operand(value));
2341  }
2342  } else if (left->IsConstantOperand()) {
2343  int32_t value = ToInteger32(LConstantOperand::cast(left));
2344  if (instr->hydrogen_value()->representation().IsSmi()) {
2345  __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2346  } else {
2347  __ cmp(ToRegister(right), Operand(value));
2348  }
2349  // We transposed the operands. Reverse the condition.
2350  cond = ReverseCondition(cond);
2351  } else {
2352  __ cmp(ToRegister(left), ToRegister(right));
2353  }
2354  }
2355  EmitBranch(instr, cond);
2356  }
2357 }
2358 
2359 
2360 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2361  Register left = ToRegister(instr->left());
2362  Register right = ToRegister(instr->right());
2363 
2364  __ cmp(left, Operand(right));
2365  EmitBranch(instr, eq);
2366 }
2367 
2368 
2369 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2370  if (instr->hydrogen()->representation().IsTagged()) {
2371  Register input_reg = ToRegister(instr->object());
2372  __ mov(ip, Operand(factory()->the_hole_value()));
2373  __ cmp(input_reg, ip);
2374  EmitBranch(instr, eq);
2375  return;
2376  }
2377 
2378  DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2379  __ VFPCompareAndSetFlags(input_reg, input_reg);
2380  EmitFalseBranch(instr, vc);
2381 
2382  Register scratch = scratch0();
2383  __ VmovHigh(scratch, input_reg);
2384  __ cmp(scratch, Operand(kHoleNanUpper32));
2385  EmitBranch(instr, eq);
2386 }
2387 
2388 
2389 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2390  Representation rep = instr->hydrogen()->value()->representation();
2391  ASSERT(!rep.IsInteger32());
2392  Register scratch = ToRegister(instr->temp());
2393 
2394  if (rep.IsDouble()) {
2395  DwVfpRegister value = ToDoubleRegister(instr->value());
2396  __ VFPCompareAndSetFlags(value, 0.0);
2397  EmitFalseBranch(instr, ne);
2398  __ VmovHigh(scratch, value);
2399  __ cmp(scratch, Operand(0x80000000));
2400  } else {
2401  Register value = ToRegister(instr->value());
2402  __ CheckMap(value,
2403  scratch,
2404  Heap::kHeapNumberMapRootIndex,
2405  instr->FalseLabel(chunk()),
2406  DO_SMI_CHECK);
2407  __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2409  __ cmp(scratch, Operand(0x80000000));
2410  __ cmp(ip, Operand(0x00000000), eq);
2411  }
2412  EmitBranch(instr, eq);
2413 }
2414 
2415 
2416 Condition LCodeGen::EmitIsObject(Register input,
2417  Register temp1,
2418  Label* is_not_object,
2419  Label* is_object) {
2420  Register temp2 = scratch0();
2421  __ JumpIfSmi(input, is_not_object);
2422 
2423  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2424  __ cmp(input, temp2);
2425  __ b(eq, is_object);
2426 
2427  // Load map.
2428  __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2429  // Undetectable objects behave like undefined.
2430  __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2431  __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2432  __ b(ne, is_not_object);
2433 
2434  // Load instance type and check that it is in object type range.
2435  __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2436  __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2437  __ b(lt, is_not_object);
2438  __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2439  return le;
2440 }
2441 
2442 
2443 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2444  Register reg = ToRegister(instr->value());
2445  Register temp1 = ToRegister(instr->temp());
2446 
2447  Condition true_cond =
2448  EmitIsObject(reg, temp1,
2449  instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2450 
2451  EmitBranch(instr, true_cond);
2452 }
2453 
2454 
2455 Condition LCodeGen::EmitIsString(Register input,
2456  Register temp1,
2457  Label* is_not_string,
2458  SmiCheck check_needed = INLINE_SMI_CHECK) {
2459  if (check_needed == INLINE_SMI_CHECK) {
2460  __ JumpIfSmi(input, is_not_string);
2461  }
2462  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2463 
2464  return lt;
2465 }
2466 
2467 
2468 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2469  Register reg = ToRegister(instr->value());
2470  Register temp1 = ToRegister(instr->temp());
2471 
2472  SmiCheck check_needed =
2473  instr->hydrogen()->value()->IsHeapObject()
2475  Condition true_cond =
2476  EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2477 
2478  EmitBranch(instr, true_cond);
2479 }
2480 
2481 
2482 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2483  Register input_reg = EmitLoadRegister(instr->value(), ip);
2484  __ SmiTst(input_reg);
2485  EmitBranch(instr, eq);
2486 }
2487 
2488 
2489 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2490  Register input = ToRegister(instr->value());
2491  Register temp = ToRegister(instr->temp());
2492 
2493  if (!instr->hydrogen()->value()->IsHeapObject()) {
2494  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2495  }
2496  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2497  __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2498  __ tst(temp, Operand(1 << Map::kIsUndetectable));
2499  EmitBranch(instr, ne);
2500 }
2501 
2502 
2503 static Condition ComputeCompareCondition(Token::Value op) {
2504  switch (op) {
2505  case Token::EQ_STRICT:
2506  case Token::EQ:
2507  return eq;
2508  case Token::LT:
2509  return lt;
2510  case Token::GT:
2511  return gt;
2512  case Token::LTE:
2513  return le;
2514  case Token::GTE:
2515  return ge;
2516  default:
2517  UNREACHABLE();
2518  return kNoCondition;
2519  }
2520 }
2521 
2522 
2523 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2524  ASSERT(ToRegister(instr->context()).is(cp));
2525  Token::Value op = instr->op();
2526 
2527  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2528  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2529  // This instruction also signals no smi code inlined.
2530  __ cmp(r0, Operand::Zero());
2531 
2532  Condition condition = ComputeCompareCondition(op);
2533 
2534  EmitBranch(instr, condition);
2535 }
2536 
2537 
2538 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2539  InstanceType from = instr->from();
2540  InstanceType to = instr->to();
2541  if (from == FIRST_TYPE) return to;
2542  ASSERT(from == to || to == LAST_TYPE);
2543  return from;
2544 }
2545 
2546 
2547 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2548  InstanceType from = instr->from();
2549  InstanceType to = instr->to();
2550  if (from == to) return eq;
2551  if (to == LAST_TYPE) return hs;
2552  if (from == FIRST_TYPE) return ls;
2553  UNREACHABLE();
2554  return eq;
2555 }
2556 
2557 
2558 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2559  Register scratch = scratch0();
2560  Register input = ToRegister(instr->value());
2561 
2562  if (!instr->hydrogen()->value()->IsHeapObject()) {
2563  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2564  }
2565 
2566  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2567  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2568 }
2569 
2570 
2571 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2572  Register input = ToRegister(instr->value());
2573  Register result = ToRegister(instr->result());
2574 
2575  __ AssertString(input);
2576 
2577  __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2578  __ IndexFromHash(result, result);
2579 }
2580 
2581 
2582 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2583  LHasCachedArrayIndexAndBranch* instr) {
2584  Register input = ToRegister(instr->value());
2585  Register scratch = scratch0();
2586 
2587  __ ldr(scratch,
2589  __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2590  EmitBranch(instr, eq);
2591 }
2592 
2593 
2594 // Branches to a label or falls through with the answer in flags. Trashes
2595 // the temp registers, but not the input.
2596 void LCodeGen::EmitClassOfTest(Label* is_true,
2597  Label* is_false,
2598  Handle<String>class_name,
2599  Register input,
2600  Register temp,
2601  Register temp2) {
2602  ASSERT(!input.is(temp));
2603  ASSERT(!input.is(temp2));
2604  ASSERT(!temp.is(temp2));
2605 
2606  __ JumpIfSmi(input, is_false);
2607 
2608  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2609  // Assuming the following assertions, we can use the same compares to test
2610  // for both being a function type and being in the object type range.
2615  LAST_SPEC_OBJECT_TYPE - 1);
2617  __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2618  __ b(lt, is_false);
2619  __ b(eq, is_true);
2620  __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2621  __ b(eq, is_true);
2622  } else {
2623  // Faster code path to avoid two compares: subtract lower bound from the
2624  // actual type and do a signed compare with the width of the type range.
2625  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2626  __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2627  __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2628  __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2630  __ b(gt, is_false);
2631  }
2632 
2633  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2634  // Check if the constructor in the map is a function.
2635  __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2636 
2637  // Objects with a non-function constructor have class 'Object'.
2638  __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2639  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2640  __ b(ne, is_true);
2641  } else {
2642  __ b(ne, is_false);
2643  }
2644 
2645  // temp now contains the constructor function. Grab the
2646  // instance class name from there.
2648  __ ldr(temp, FieldMemOperand(temp,
2650  // The class name we are testing against is internalized since it's a literal.
2651  // The name in the constructor is internalized because of the way the context
2652  // is booted. This routine isn't expected to work for random API-created
2653  // classes and it doesn't have to because you can't access it with natives
2654  // syntax. Since both sides are internalized it is sufficient to use an
2655  // identity comparison.
2656  __ cmp(temp, Operand(class_name));
2657  // End with the answer in flags.
2658 }
2659 
2660 
2661 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2662  Register input = ToRegister(instr->value());
2663  Register temp = scratch0();
2664  Register temp2 = ToRegister(instr->temp());
2665  Handle<String> class_name = instr->hydrogen()->class_name();
2666 
2667  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2668  class_name, input, temp, temp2);
2669 
2670  EmitBranch(instr, eq);
2671 }
2672 
2673 
2674 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2675  Register reg = ToRegister(instr->value());
2676  Register temp = ToRegister(instr->temp());
2677 
2678  __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2679  __ cmp(temp, Operand(instr->map()));
2680  EmitBranch(instr, eq);
2681 }
2682 
2683 
2684 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2685  ASSERT(ToRegister(instr->context()).is(cp));
2686  ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
2687  ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
2688 
2689  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2690  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2691 
2692  __ cmp(r0, Operand::Zero());
2693  __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2694  __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2695 }
2696 
2697 
2698 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2699  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2700  public:
2701  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2702  LInstanceOfKnownGlobal* instr)
2703  : LDeferredCode(codegen), instr_(instr) { }
2704  virtual void Generate() V8_OVERRIDE {
2705  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2706  }
2707  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2708  Label* map_check() { return &map_check_; }
2709  private:
2710  LInstanceOfKnownGlobal* instr_;
2711  Label map_check_;
2712  };
2713 
2714  DeferredInstanceOfKnownGlobal* deferred;
2715  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2716 
2717  Label done, false_result;
2718  Register object = ToRegister(instr->value());
2719  Register temp = ToRegister(instr->temp());
2720  Register result = ToRegister(instr->result());
2721 
2722  // A Smi is not instance of anything.
2723  __ JumpIfSmi(object, &false_result);
2724 
2725  // This is the inlined call site instanceof cache. The two occurences of the
2726  // hole value will be patched to the last map/result pair generated by the
2727  // instanceof stub.
2728  Label cache_miss;
2729  Register map = temp;
2730  __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2731  {
2732  // Block constant pool emission to ensure the positions of instructions are
2733  // as expected by the patcher. See InstanceofStub::Generate().
2734  Assembler::BlockConstPoolScope block_const_pool(masm());
2735  __ bind(deferred->map_check()); // Label for calculating code patching.
2736  // We use Factory::the_hole_value() on purpose instead of loading from the
2737  // root array to force relocation to be able to later patch with
2738  // the cached map.
2739  PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
2740  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2741  __ mov(ip, Operand(Handle<Object>(cell)));
2743  __ cmp(map, Operand(ip));
2744  __ b(ne, &cache_miss);
2745  // We use Factory::the_hole_value() on purpose instead of loading from the
2746  // root array to force relocation to be able to later patch
2747  // with true or false.
2748  __ mov(result, Operand(factory()->the_hole_value()));
2749  }
2750  __ b(&done);
2751 
2752  // The inlined call site cache did not match. Check null and string before
2753  // calling the deferred code.
2754  __ bind(&cache_miss);
2755  // Null is not instance of anything.
2756  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2757  __ cmp(object, Operand(ip));
2758  __ b(eq, &false_result);
2759 
2760  // String values is not instance of anything.
2761  Condition is_string = masm_->IsObjectStringType(object, temp);
2762  __ b(is_string, &false_result);
2763 
2764  // Go to the deferred code.
2765  __ b(deferred->entry());
2766 
2767  __ bind(&false_result);
2768  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2769 
2770  // Here result has either true or false. Deferred code also produces true or
2771  // false object.
2772  __ bind(deferred->exit());
2773  __ bind(&done);
2774 }
2775 
2776 
2777 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2778  Label* map_check) {
2780  flags = static_cast<InstanceofStub::Flags>(
2782  flags = static_cast<InstanceofStub::Flags>(
2784  flags = static_cast<InstanceofStub::Flags>(
2786  InstanceofStub stub(flags);
2787 
2788  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2789  LoadContextFromDeferred(instr->context());
2790 
2791  __ Move(InstanceofStub::right(), instr->function());
2792  static const int kAdditionalDelta = 4;
2793  // Make sure that code size is predicable, since we use specific constants
2794  // offsets in the code to find embedded values..
2795  PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
2796  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2797  Label before_push_delta;
2798  __ bind(&before_push_delta);
2799  __ BlockConstPoolFor(kAdditionalDelta);
2800  // r5 is used to communicate the offset to the location of the map check.
2801  __ mov(r5, Operand(delta * kPointerSize));
2802  // The mov above can generate one or two instructions. The delta was computed
2803  // for two instructions, so we need to pad here in case of one instruction.
2804  if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2805  ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2806  __ nop();
2807  }
2808  CallCodeGeneric(stub.GetCode(isolate()),
2809  RelocInfo::CODE_TARGET,
2810  instr,
2811  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2812  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2813  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2814  // Put the result value (r0) into the result register slot and
2815  // restore all registers.
2816  __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
2817 }
2818 
2819 
2820 void LCodeGen::DoCmpT(LCmpT* instr) {
2821  ASSERT(ToRegister(instr->context()).is(cp));
2822  Token::Value op = instr->op();
2823 
2824  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2825  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2826  // This instruction also signals no smi code inlined.
2827  __ cmp(r0, Operand::Zero());
2828 
2829  Condition condition = ComputeCompareCondition(op);
2830  __ LoadRoot(ToRegister(instr->result()),
2831  Heap::kTrueValueRootIndex,
2832  condition);
2833  __ LoadRoot(ToRegister(instr->result()),
2834  Heap::kFalseValueRootIndex,
2835  NegateCondition(condition));
2836 }
2837 
2838 
2839 void LCodeGen::DoReturn(LReturn* instr) {
2840  if (FLAG_trace && info()->IsOptimizing()) {
2841  // Push the return value on the stack as the parameter.
2842  // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2843  // managed by the register allocator and tearing down the frame, it's
2844  // safe to write to the context register.
2845  __ push(r0);
2847  __ CallRuntime(Runtime::kTraceExit, 1);
2848  }
2849  if (info()->saves_caller_doubles()) {
2850  RestoreCallerDoubles();
2851  }
2852  int no_frame_start = -1;
2853  if (NeedsEagerFrame()) {
2854  no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2855  }
2856  if (instr->has_constant_parameter_count()) {
2857  int parameter_count = ToInteger32(instr->constant_parameter_count());
2858  int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2859  if (sp_delta != 0) {
2860  __ add(sp, sp, Operand(sp_delta));
2861  }
2862  } else {
2863  Register reg = ToRegister(instr->parameter_count());
2864  // The argument count parameter is a smi
2865  __ SmiUntag(reg);
2866  __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2867  }
2868 
2869  __ Jump(lr);
2870 
2871  if (no_frame_start != -1) {
2872  info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2873  }
2874 }
2875 
2876 
2877 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2878  Register result = ToRegister(instr->result());
2879  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2880  __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
2881  if (instr->hydrogen()->RequiresHoleCheck()) {
2882  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2883  __ cmp(result, ip);
2884  DeoptimizeIf(eq, instr->environment());
2885  }
2886 }
2887 
2888 
2889 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2890  ASSERT(ToRegister(instr->context()).is(cp));
2891  ASSERT(ToRegister(instr->global_object()).is(r0));
2892  ASSERT(ToRegister(instr->result()).is(r0));
2893 
2894  __ mov(r2, Operand(instr->name()));
2895  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2896  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2897  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2898 }
2899 
2900 
2901 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2902  Register value = ToRegister(instr->value());
2903  Register cell = scratch0();
2904 
2905  // Load the cell.
2906  __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
2907 
2908  // If the cell we are storing to contains the hole it could have
2909  // been deleted from the property dictionary. In that case, we need
2910  // to update the property details in the property dictionary to mark
2911  // it as no longer deleted.
2912  if (instr->hydrogen()->RequiresHoleCheck()) {
2913  // We use a temp to check the payload (CompareRoot might clobber ip).
2914  Register payload = ToRegister(instr->temp());
2915  __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
2916  __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2917  DeoptimizeIf(eq, instr->environment());
2918  }
2919 
2920  // Store the value.
2921  __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
2922  // Cells are always rescanned, so no write barrier here.
2923 }
2924 
2925 
2926 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2927  Register context = ToRegister(instr->context());
2928  Register result = ToRegister(instr->result());
2929  __ ldr(result, ContextOperand(context, instr->slot_index()));
2930  if (instr->hydrogen()->RequiresHoleCheck()) {
2931  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2932  __ cmp(result, ip);
2933  if (instr->hydrogen()->DeoptimizesOnHole()) {
2934  DeoptimizeIf(eq, instr->environment());
2935  } else {
2936  __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
2937  }
2938  }
2939 }
2940 
2941 
2942 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2943  Register context = ToRegister(instr->context());
2944  Register value = ToRegister(instr->value());
2945  Register scratch = scratch0();
2946  MemOperand target = ContextOperand(context, instr->slot_index());
2947 
2948  Label skip_assignment;
2949 
2950  if (instr->hydrogen()->RequiresHoleCheck()) {
2951  __ ldr(scratch, target);
2952  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2953  __ cmp(scratch, ip);
2954  if (instr->hydrogen()->DeoptimizesOnHole()) {
2955  DeoptimizeIf(eq, instr->environment());
2956  } else {
2957  __ b(ne, &skip_assignment);
2958  }
2959  }
2960 
2961  __ str(value, target);
2962  if (instr->hydrogen()->NeedsWriteBarrier()) {
2963  SmiCheck check_needed =
2964  instr->hydrogen()->value()->IsHeapObject()
2966  __ RecordWriteContextSlot(context,
2967  target.offset(),
2968  value,
2969  scratch,
2971  kSaveFPRegs,
2973  check_needed);
2974  }
2975 
2976  __ bind(&skip_assignment);
2977 }
2978 
2979 
2980 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2981  HObjectAccess access = instr->hydrogen()->access();
2982  int offset = access.offset();
2983  Register object = ToRegister(instr->object());
2984 
2985  if (access.IsExternalMemory()) {
2986  Register result = ToRegister(instr->result());
2987  MemOperand operand = MemOperand(object, offset);
2988  __ Load(result, operand, access.representation());
2989  return;
2990  }
2991 
2992  if (instr->hydrogen()->representation().IsDouble()) {
2993  DwVfpRegister result = ToDoubleRegister(instr->result());
2994  __ vldr(result, FieldMemOperand(object, offset));
2995  return;
2996  }
2997 
2998  Register result = ToRegister(instr->result());
2999  if (!access.IsInobject()) {
3000  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3001  object = result;
3002  }
3003  MemOperand operand = FieldMemOperand(object, offset);
3004  __ Load(result, operand, access.representation());
3005 }
3006 
3007 
3008 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3009  ASSERT(ToRegister(instr->context()).is(cp));
3010  ASSERT(ToRegister(instr->object()).is(r0));
3011  ASSERT(ToRegister(instr->result()).is(r0));
3012 
3013  // Name is always in r2.
3014  __ mov(r2, Operand(instr->name()));
3015  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3016  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3017 }
3018 
3019 
3020 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3021  Register scratch = scratch0();
3022  Register function = ToRegister(instr->function());
3023  Register result = ToRegister(instr->result());
3024 
3025  // Check that the function really is a function. Load map into the
3026  // result register.
3027  __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
3028  DeoptimizeIf(ne, instr->environment());
3029 
3030  // Make sure that the function has an instance prototype.
3031  Label non_instance;
3032  __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3033  __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
3034  __ b(ne, &non_instance);
3035 
3036  // Get the prototype or initial map from the function.
3037  __ ldr(result,
3039 
3040  // Check that the function has a prototype or an initial map.
3041  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3042  __ cmp(result, ip);
3043  DeoptimizeIf(eq, instr->environment());
3044 
3045  // If the function does not have an initial map, we're done.
3046  Label done;
3047  __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3048  __ b(ne, &done);
3049 
3050  // Get the prototype from the initial map.
3051  __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3052  __ jmp(&done);
3053 
3054  // Non-instance prototype: Fetch prototype from constructor field
3055  // in initial map.
3056  __ bind(&non_instance);
3057  __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3058 
3059  // All done.
3060  __ bind(&done);
3061 }
3062 
3063 
3064 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3065  Register result = ToRegister(instr->result());
3066  __ LoadRoot(result, instr->index());
3067 }
3068 
3069 
3070 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3071  Register arguments = ToRegister(instr->arguments());
3072  Register result = ToRegister(instr->result());
3073  // There are two words between the frame pointer and the last argument.
3074  // Subtracting from length accounts for one of them add one more.
3075  if (instr->length()->IsConstantOperand()) {
3076  int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3077  if (instr->index()->IsConstantOperand()) {
3078  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3079  int index = (const_length - const_index) + 1;
3080  __ ldr(result, MemOperand(arguments, index * kPointerSize));
3081  } else {
3082  Register index = ToRegister(instr->index());
3083  __ rsb(result, index, Operand(const_length + 1));
3084  __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3085  }
3086  } else if (instr->index()->IsConstantOperand()) {
3087  Register length = ToRegister(instr->length());
3088  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3089  int loc = const_index - 1;
3090  if (loc != 0) {
3091  __ sub(result, length, Operand(loc));
3092  __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3093  } else {
3094  __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3095  }
3096  } else {
3097  Register length = ToRegister(instr->length());
3098  Register index = ToRegister(instr->index());
3099  __ sub(result, length, index);
3100  __ add(result, result, Operand(1));
3101  __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3102  }
3103 }
3104 
3105 
3106 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3107  Register external_pointer = ToRegister(instr->elements());
3108  Register key = no_reg;
3109  ElementsKind elements_kind = instr->elements_kind();
3110  bool key_is_constant = instr->key()->IsConstantOperand();
3111  int constant_key = 0;
3112  if (key_is_constant) {
3113  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3114  if (constant_key & 0xF0000000) {
3115  Abort(kArrayIndexConstantValueTooBig);
3116  }
3117  } else {
3118  key = ToRegister(instr->key());
3119  }
3120  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3121  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3122  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3123  int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
3125  : 0;
3126 
3127 
3128  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3129  elements_kind == FLOAT32_ELEMENTS ||
3130  elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3131  elements_kind == FLOAT64_ELEMENTS) {
3132  int base_offset =
3133  (instr->additional_index() << element_size_shift) + additional_offset;
3134  DwVfpRegister result = ToDoubleRegister(instr->result());
3135  Operand operand = key_is_constant
3136  ? Operand(constant_key << element_size_shift)
3137  : Operand(key, LSL, shift_size);
3138  __ add(scratch0(), external_pointer, operand);
3139  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3140  elements_kind == FLOAT32_ELEMENTS) {
3141  __ vldr(double_scratch0().low(), scratch0(), base_offset);
3142  __ vcvt_f64_f32(result, double_scratch0().low());
3143  } else { // loading doubles, not floats.
3144  __ vldr(result, scratch0(), base_offset);
3145  }
3146  } else {
3147  Register result = ToRegister(instr->result());
3148  MemOperand mem_operand = PrepareKeyedOperand(
3149  key, external_pointer, key_is_constant, constant_key,
3150  element_size_shift, shift_size,
3151  instr->additional_index(), additional_offset);
3152  switch (elements_kind) {
3154  case INT8_ELEMENTS:
3155  __ ldrsb(result, mem_operand);
3156  break;
3159  case UINT8_ELEMENTS:
3161  __ ldrb(result, mem_operand);
3162  break;
3164  case INT16_ELEMENTS:
3165  __ ldrsh(result, mem_operand);
3166  break;
3168  case UINT16_ELEMENTS:
3169  __ ldrh(result, mem_operand);
3170  break;
3172  case INT32_ELEMENTS:
3173  __ ldr(result, mem_operand);
3174  break;
3176  case UINT32_ELEMENTS:
3177  __ ldr(result, mem_operand);
3178  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3179  __ cmp(result, Operand(0x80000000));
3180  DeoptimizeIf(cs, instr->environment());
3181  }
3182  break;
3183  case FLOAT32_ELEMENTS:
3184  case FLOAT64_ELEMENTS:
3188  case FAST_HOLEY_ELEMENTS:
3190  case FAST_DOUBLE_ELEMENTS:
3191  case FAST_ELEMENTS:
3192  case FAST_SMI_ELEMENTS:
3193  case DICTIONARY_ELEMENTS:
3195  UNREACHABLE();
3196  break;
3197  }
3198  }
3199 }
3200 
3201 
3202 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3203  Register elements = ToRegister(instr->elements());
3204  bool key_is_constant = instr->key()->IsConstantOperand();
3205  Register key = no_reg;
3206  DwVfpRegister result = ToDoubleRegister(instr->result());
3207  Register scratch = scratch0();
3208 
3209  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3210 
3211  int base_offset =
3213  (instr->additional_index() << element_size_shift);
3214  if (key_is_constant) {
3215  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3216  if (constant_key & 0xF0000000) {
3217  Abort(kArrayIndexConstantValueTooBig);
3218  }
3219  base_offset += constant_key << element_size_shift;
3220  }
3221  __ add(scratch, elements, Operand(base_offset));
3222 
3223  if (!key_is_constant) {
3224  key = ToRegister(instr->key());
3225  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3226  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3227  __ add(scratch, scratch, Operand(key, LSL, shift_size));
3228  }
3229 
3230  __ vldr(result, scratch, 0);
3231 
3232  if (instr->hydrogen()->RequiresHoleCheck()) {
3233  __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3234  __ cmp(scratch, Operand(kHoleNanUpper32));
3235  DeoptimizeIf(eq, instr->environment());
3236  }
3237 }
3238 
3239 
3240 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3241  Register elements = ToRegister(instr->elements());
3242  Register result = ToRegister(instr->result());
3243  Register scratch = scratch0();
3244  Register store_base = scratch;
3245  int offset = 0;
3246 
3247  if (instr->key()->IsConstantOperand()) {
3248  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3249  offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3250  instr->additional_index());
3251  store_base = elements;
3252  } else {
3253  Register key = ToRegister(instr->key());
3254  // Even though the HLoadKeyed instruction forces the input
3255  // representation for the key to be an integer, the input gets replaced
3256  // during bound check elimination with the index argument to the bounds
3257  // check, which can be tagged, so that case must be handled here, too.
3258  if (instr->hydrogen()->key()->representation().IsSmi()) {
3259  __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3260  } else {
3261  __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3262  }
3263  offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3264  }
3265  __ ldr(result, FieldMemOperand(store_base, offset));
3266 
3267  // Check for the hole value.
3268  if (instr->hydrogen()->RequiresHoleCheck()) {
3269  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3270  __ SmiTst(result);
3271  DeoptimizeIf(ne, instr->environment());
3272  } else {
3273  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3274  __ cmp(result, scratch);
3275  DeoptimizeIf(eq, instr->environment());
3276  }
3277  }
3278 }
3279 
3280 
3281 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3282  if (instr->is_typed_elements()) {
3283  DoLoadKeyedExternalArray(instr);
3284  } else if (instr->hydrogen()->representation().IsDouble()) {
3285  DoLoadKeyedFixedDoubleArray(instr);
3286  } else {
3287  DoLoadKeyedFixedArray(instr);
3288  }
3289 }
3290 
3291 
3293  Register base,
3294  bool key_is_constant,
3295  int constant_key,
3296  int element_size,
3297  int shift_size,
3298  int additional_index,
3299  int additional_offset) {
3300  int base_offset = (additional_index << element_size) + additional_offset;
3301  if (key_is_constant) {
3302  return MemOperand(base,
3303  base_offset + (constant_key << element_size));
3304  }
3305 
3306  if (additional_offset != 0) {
3307  __ mov(scratch0(), Operand(base_offset));
3308  if (shift_size >= 0) {
3309  __ add(scratch0(), scratch0(), Operand(key, LSL, shift_size));
3310  } else {
3311  ASSERT_EQ(-1, shift_size);
3312  __ add(scratch0(), scratch0(), Operand(key, LSR, 1));
3313  }
3314  return MemOperand(base, scratch0());
3315  }
3316 
3317  if (additional_index != 0) {
3318  additional_index *= 1 << (element_size - shift_size);
3319  __ add(scratch0(), key, Operand(additional_index));
3320  }
3321 
3322  if (additional_index == 0) {
3323  if (shift_size >= 0) {
3324  return MemOperand(base, key, LSL, shift_size);
3325  } else {
3326  ASSERT_EQ(-1, shift_size);
3327  return MemOperand(base, key, LSR, 1);
3328  }
3329  }
3330 
3331  if (shift_size >= 0) {
3332  return MemOperand(base, scratch0(), LSL, shift_size);
3333  } else {
3334  ASSERT_EQ(-1, shift_size);
3335  return MemOperand(base, scratch0(), LSR, 1);
3336  }
3337 }
3338 
3339 
3340 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3341  ASSERT(ToRegister(instr->context()).is(cp));
3342  ASSERT(ToRegister(instr->object()).is(r1));
3343  ASSERT(ToRegister(instr->key()).is(r0));
3344 
3345  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3346  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3347 }
3348 
3349 
3350 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3351  Register scratch = scratch0();
3352  Register result = ToRegister(instr->result());
3353 
3354  if (instr->hydrogen()->from_inlined()) {
3355  __ sub(result, sp, Operand(2 * kPointerSize));
3356  } else {
3357  // Check if the calling frame is an arguments adaptor frame.
3358  Label done, adapted;
3360  __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3361  __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3362 
3363  // Result is the frame pointer for the frame if not adapted and for the real
3364  // frame below the adaptor frame if adapted.
3365  __ mov(result, fp, LeaveCC, ne);
3366  __ mov(result, scratch, LeaveCC, eq);
3367  }
3368 }
3369 
3370 
3371 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3372  Register elem = ToRegister(instr->elements());
3373  Register result = ToRegister(instr->result());
3374 
3375  Label done;
3376 
3377  // If no arguments adaptor frame the number of arguments is fixed.
3378  __ cmp(fp, elem);
3379  __ mov(result, Operand(scope()->num_parameters()));
3380  __ b(eq, &done);
3381 
3382  // Arguments adaptor frame present. Get argument length from there.
3384  __ ldr(result,
3386  __ SmiUntag(result);
3387 
3388  // Argument length is in result register.
3389  __ bind(&done);
3390 }
3391 
3392 
3393 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3394  Register receiver = ToRegister(instr->receiver());
3395  Register function = ToRegister(instr->function());
3396  Register result = ToRegister(instr->result());
3397  Register scratch = scratch0();
3398 
3399  // If the receiver is null or undefined, we have to pass the global
3400  // object as a receiver to normal functions. Values have to be
3401  // passed unchanged to builtins and strict-mode functions.
3402  Label global_object, result_in_receiver;
3403 
3404  if (!instr->hydrogen()->known_function()) {
3405  // Do not transform the receiver to object for strict mode
3406  // functions.
3407  __ ldr(scratch,
3409  __ ldr(scratch,
3412  __ tst(scratch, Operand(mask));
3413  __ b(ne, &result_in_receiver);
3414 
3415  // Do not transform the receiver to object for builtins.
3416  __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3417  __ b(ne, &result_in_receiver);
3418  }
3419 
3420  // Normal function. Replace undefined or null with global receiver.
3421  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3422  __ cmp(receiver, scratch);
3423  __ b(eq, &global_object);
3424  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3425  __ cmp(receiver, scratch);
3426  __ b(eq, &global_object);
3427 
3428  // Deoptimize if the receiver is not a JS object.
3429  __ SmiTst(receiver);
3430  DeoptimizeIf(eq, instr->environment());
3431  __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3432  DeoptimizeIf(lt, instr->environment());
3433 
3434  __ b(&result_in_receiver);
3435  __ bind(&global_object);
3436  __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3437  __ ldr(result,
3439  __ ldr(result,
3441 
3442  if (result.is(receiver)) {
3443  __ bind(&result_in_receiver);
3444  } else {
3445  Label result_ok;
3446  __ b(&result_ok);
3447  __ bind(&result_in_receiver);
3448  __ mov(result, receiver);
3449  __ bind(&result_ok);
3450  }
3451 }
3452 
3453 
3454 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3455  Register receiver = ToRegister(instr->receiver());
3456  Register function = ToRegister(instr->function());
3457  Register length = ToRegister(instr->length());
3458  Register elements = ToRegister(instr->elements());
3459  Register scratch = scratch0();
3460  ASSERT(receiver.is(r0)); // Used for parameter count.
3461  ASSERT(function.is(r1)); // Required by InvokeFunction.
3462  ASSERT(ToRegister(instr->result()).is(r0));
3463 
3464  // Copy the arguments to this function possibly from the
3465  // adaptor frame below it.
3466  const uint32_t kArgumentsLimit = 1 * KB;
3467  __ cmp(length, Operand(kArgumentsLimit));
3468  DeoptimizeIf(hi, instr->environment());
3469 
3470  // Push the receiver and use the register to keep the original
3471  // number of arguments.
3472  __ push(receiver);
3473  __ mov(receiver, length);
3474  // The arguments are at a one pointer size offset from elements.
3475  __ add(elements, elements, Operand(1 * kPointerSize));
3476 
3477  // Loop through the arguments pushing them onto the execution
3478  // stack.
3479  Label invoke, loop;
3480  // length is a small non-negative integer, due to the test above.
3481  __ cmp(length, Operand::Zero());
3482  __ b(eq, &invoke);
3483  __ bind(&loop);
3484  __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3485  __ push(scratch);
3486  __ sub(length, length, Operand(1), SetCC);
3487  __ b(ne, &loop);
3488 
3489  __ bind(&invoke);
3490  ASSERT(instr->HasPointerMap());
3491  LPointerMap* pointers = instr->pointer_map();
3492  SafepointGenerator safepoint_generator(
3493  this, pointers, Safepoint::kLazyDeopt);
3494  // The number of arguments is stored in receiver which is r0, as expected
3495  // by InvokeFunction.
3496  ParameterCount actual(receiver);
3497  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3498 }
3499 
3500 
3501 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3502  LOperand* argument = instr->value();
3503  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3504  Abort(kDoPushArgumentNotImplementedForDoubleType);
3505  } else {
3506  Register argument_reg = EmitLoadRegister(argument, ip);
3507  __ push(argument_reg);
3508  }
3509 }
3510 
3511 
3512 void LCodeGen::DoDrop(LDrop* instr) {
3513  __ Drop(instr->count());
3514 }
3515 
3516 
3517 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3518  Register result = ToRegister(instr->result());
3520 }
3521 
3522 
3523 void LCodeGen::DoContext(LContext* instr) {
3524  // If there is a non-return use, the context must be moved to a register.
3525  Register result = ToRegister(instr->result());
3526  if (info()->IsOptimizing()) {
3528  } else {
3529  // If there is no frame, the context must be in cp.
3530  ASSERT(result.is(cp));
3531  }
3532 }
3533 
3534 
3535 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3536  ASSERT(ToRegister(instr->context()).is(cp));
3537  __ push(cp); // The context is the first argument.
3538  __ Move(scratch0(), instr->hydrogen()->pairs());
3539  __ push(scratch0());
3540  __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3541  __ push(scratch0());
3542  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3543 }
3544 
3545 
3546 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3548  int arity,
3549  LInstruction* instr,
3550  R1State r1_state) {
3551  bool dont_adapt_arguments =
3552  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3553  bool can_invoke_directly =
3554  dont_adapt_arguments || formal_parameter_count == arity;
3555 
3556  LPointerMap* pointers = instr->pointer_map();
3557 
3558  if (can_invoke_directly) {
3559  if (r1_state == R1_UNINITIALIZED) {
3560  __ Move(r1, function);
3561  }
3562 
3563  // Change context.
3565 
3566  // Set r0 to arguments count if adaption is not needed. Assumes that r0
3567  // is available to write to at this point.
3568  if (dont_adapt_arguments) {
3569  __ mov(r0, Operand(arity));
3570  }
3571 
3572  // Invoke function.
3574  __ Call(ip);
3575 
3576  // Set up deoptimization.
3577  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3578  } else {
3579  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3580  ParameterCount count(arity);
3581  ParameterCount expected(formal_parameter_count);
3582  __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3583  }
3584 }
3585 
3586 
3588  ASSERT(instr->context() != NULL);
3589  ASSERT(ToRegister(instr->context()).is(cp));
3590  Register input = ToRegister(instr->value());
3591  Register result = ToRegister(instr->result());
3592  Register scratch = scratch0();
3593 
3594  // Deoptimize if not a heap number.
3595  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3596  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3597  __ cmp(scratch, Operand(ip));
3598  DeoptimizeIf(ne, instr->environment());
3599 
3600  Label done;
3601  Register exponent = scratch0();
3602  scratch = no_reg;
3603  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3604  // Check the sign of the argument. If the argument is positive, just
3605  // return it.
3606  __ tst(exponent, Operand(HeapNumber::kSignMask));
3607  // Move the input to the result if necessary.
3608  __ Move(result, input);
3609  __ b(eq, &done);
3610 
3611  // Input is negative. Reverse its sign.
3612  // Preserve the value of all registers.
3613  {
3614  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3615 
3616  // Registers were saved at the safepoint, so we can use
3617  // many scratch registers.
3618  Register tmp1 = input.is(r1) ? r0 : r1;
3619  Register tmp2 = input.is(r2) ? r0 : r2;
3620  Register tmp3 = input.is(r3) ? r0 : r3;
3621  Register tmp4 = input.is(r4) ? r0 : r4;
3622 
3623  // exponent: floating point exponent value.
3624 
3625  Label allocated, slow;
3626  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3627  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3628  __ b(&allocated);
3629 
3630  // Slow case: Call the runtime system to do the number allocation.
3631  __ bind(&slow);
3632 
3633  CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3634  instr->context());
3635  // Set the pointer to the new heap number in tmp.
3636  if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3637  // Restore input_reg after call to runtime.
3638  __ LoadFromSafepointRegisterSlot(input, input);
3639  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3640 
3641  __ bind(&allocated);
3642  // exponent: floating point exponent value.
3643  // tmp1: allocated heap number.
3644  __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3645  __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3646  __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3648 
3649  __ StoreToSafepointRegisterSlot(tmp1, result);
3650  }
3651 
3652  __ bind(&done);
3653 }
3654 
3655 
3656 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3657  Register input = ToRegister(instr->value());
3658  Register result = ToRegister(instr->result());
3659  __ cmp(input, Operand::Zero());
3660  __ Move(result, input, pl);
3661  // We can make rsb conditional because the previous cmp instruction
3662  // will clear the V (overflow) flag and rsb won't set this flag
3663  // if input is positive.
3664  __ rsb(result, input, Operand::Zero(), SetCC, mi);
3665  // Deoptimize on overflow.
3666  DeoptimizeIf(vs, instr->environment());
3667 }
3668 
3669 
3670 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3671  // Class for deferred case.
3672  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3673  public:
3674  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3675  : LDeferredCode(codegen), instr_(instr) { }
3676  virtual void Generate() V8_OVERRIDE {
3677  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3678  }
3679  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3680  private:
3681  LMathAbs* instr_;
3682  };
3683 
3684  Representation r = instr->hydrogen()->value()->representation();
3685  if (r.IsDouble()) {
3686  DwVfpRegister input = ToDoubleRegister(instr->value());
3687  DwVfpRegister result = ToDoubleRegister(instr->result());
3688  __ vabs(result, input);
3689  } else if (r.IsSmiOrInteger32()) {
3690  EmitIntegerMathAbs(instr);
3691  } else {
3692  // Representation is tagged.
3693  DeferredMathAbsTaggedHeapNumber* deferred =
3694  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3695  Register input = ToRegister(instr->value());
3696  // Smi check.
3697  __ JumpIfNotSmi(input, deferred->entry());
3698  // If smi, handle it directly.
3699  EmitIntegerMathAbs(instr);
3700  __ bind(deferred->exit());
3701  }
3702 }
3703 
3704 
3705 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3706  DwVfpRegister input = ToDoubleRegister(instr->value());
3707  Register result = ToRegister(instr->result());
3708  Register input_high = scratch0();
3709  Label done, exact;
3710 
3711  __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3712  DeoptimizeIf(al, instr->environment());
3713 
3714  __ bind(&exact);
3715  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3716  // Test for -0.
3717  __ cmp(result, Operand::Zero());
3718  __ b(ne, &done);
3719  __ cmp(input_high, Operand::Zero());
3720  DeoptimizeIf(mi, instr->environment());
3721  }
3722  __ bind(&done);
3723 }
3724 
3725 
3726 void LCodeGen::DoMathRound(LMathRound* instr) {
3727  DwVfpRegister input = ToDoubleRegister(instr->value());
3728  Register result = ToRegister(instr->result());
3729  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3730  DwVfpRegister input_plus_dot_five = double_scratch1;
3731  Register input_high = scratch0();
3732  DwVfpRegister dot_five = double_scratch0();
3733  Label convert, done;
3734 
3735  __ Vmov(dot_five, 0.5, scratch0());
3736  __ vabs(double_scratch1, input);
3737  __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3738  // If input is in [-0.5, -0], the result is -0.
3739  // If input is in [+0, +0.5[, the result is +0.
3740  // If the input is +0.5, the result is 1.
3741  __ b(hi, &convert); // Out of [-0.5, +0.5].
3742  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3743  __ VmovHigh(input_high, input);
3744  __ cmp(input_high, Operand::Zero());
3745  DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
3746  }
3747  __ VFPCompareAndSetFlags(input, dot_five);
3748  __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3749  // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3750  // flag kBailoutOnMinusZero.
3751  __ mov(result, Operand::Zero(), LeaveCC, ne);
3752  __ b(&done);
3753 
3754  __ bind(&convert);
3755  __ vadd(input_plus_dot_five, input, dot_five);
3756  // Reuse dot_five (double_scratch0) as we no longer need this value.
3757  __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3758  &done, &done);
3759  DeoptimizeIf(al, instr->environment());
3760  __ bind(&done);
3761 }
3762 
3763 
3764 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3765  DwVfpRegister input = ToDoubleRegister(instr->value());
3766  DwVfpRegister result = ToDoubleRegister(instr->result());
3767  __ vsqrt(result, input);
3768 }
3769 
3770 
3771 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3772  DwVfpRegister input = ToDoubleRegister(instr->value());
3773  DwVfpRegister result = ToDoubleRegister(instr->result());
3774  DwVfpRegister temp = double_scratch0();
3775 
3776  // Note that according to ECMA-262 15.8.2.13:
3777  // Math.pow(-Infinity, 0.5) == Infinity
3778  // Math.sqrt(-Infinity) == NaN
3779  Label done;
3780  __ vmov(temp, -V8_INFINITY, scratch0());
3781  __ VFPCompareAndSetFlags(input, temp);
3782  __ vneg(result, temp, eq);
3783  __ b(&done, eq);
3784 
3785  // Add +0 to convert -0 to +0.
3786  __ vadd(result, input, kDoubleRegZero);
3787  __ vsqrt(result, result);
3788  __ bind(&done);
3789 }
3790 
3791 
3792 void LCodeGen::DoPower(LPower* instr) {
3793  Representation exponent_type = instr->hydrogen()->right()->representation();
3794  // Having marked this as a call, we can use any registers.
3795  // Just make sure that the input/output registers are the expected ones.
3796  ASSERT(!instr->right()->IsDoubleRegister() ||
3797  ToDoubleRegister(instr->right()).is(d1));
3798  ASSERT(!instr->right()->IsRegister() ||
3799  ToRegister(instr->right()).is(r2));
3800  ASSERT(ToDoubleRegister(instr->left()).is(d0));
3801  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3802 
3803  if (exponent_type.IsSmi()) {
3804  MathPowStub stub(MathPowStub::TAGGED);
3805  __ CallStub(&stub);
3806  } else if (exponent_type.IsTagged()) {
3807  Label no_deopt;
3808  __ JumpIfSmi(r2, &no_deopt);
3810  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3811  __ cmp(r6, Operand(ip));
3812  DeoptimizeIf(ne, instr->environment());
3813  __ bind(&no_deopt);
3814  MathPowStub stub(MathPowStub::TAGGED);
3815  __ CallStub(&stub);
3816  } else if (exponent_type.IsInteger32()) {
3817  MathPowStub stub(MathPowStub::INTEGER);
3818  __ CallStub(&stub);
3819  } else {
3820  ASSERT(exponent_type.IsDouble());
3821  MathPowStub stub(MathPowStub::DOUBLE);
3822  __ CallStub(&stub);
3823  }
3824 }
3825 
3826 
3827 void LCodeGen::DoMathExp(LMathExp* instr) {
3828  DwVfpRegister input = ToDoubleRegister(instr->value());
3829  DwVfpRegister result = ToDoubleRegister(instr->result());
3830  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3831  DwVfpRegister double_scratch2 = double_scratch0();
3832  Register temp1 = ToRegister(instr->temp1());
3833  Register temp2 = ToRegister(instr->temp2());
3834 
3836  masm(), input, result, double_scratch1, double_scratch2,
3837  temp1, temp2, scratch0());
3838 }
3839 
3840 
3841 void LCodeGen::DoMathLog(LMathLog* instr) {
3842  __ PrepareCallCFunction(0, 1, scratch0());
3843  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3844  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3845  0, 1);
3846  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3847 }
3848 
3849 
3850 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3851  Register input = ToRegister(instr->value());
3852  Register result = ToRegister(instr->result());
3853  __ clz(result, input);
3854 }
3855 
3856 
3857 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3858  ASSERT(ToRegister(instr->context()).is(cp));
3859  ASSERT(ToRegister(instr->function()).is(r1));
3860  ASSERT(instr->HasPointerMap());
3861 
3862  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3863  if (known_function.is_null()) {
3864  LPointerMap* pointers = instr->pointer_map();
3865  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3866  ParameterCount count(instr->arity());
3867  __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
3868  } else {
3869  CallKnownFunction(known_function,
3870  instr->hydrogen()->formal_parameter_count(),
3871  instr->arity(),
3872  instr,
3873  R1_CONTAINS_TARGET);
3874  }
3875 }
3876 
3877 
3878 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3879  ASSERT(ToRegister(instr->result()).is(r0));
3880 
3881  LPointerMap* pointers = instr->pointer_map();
3882  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3883 
3884  if (instr->target()->IsConstantOperand()) {
3885  LConstantOperand* target = LConstantOperand::cast(instr->target());
3886  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3887  generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3888  PlatformCallInterfaceDescriptor* call_descriptor =
3889  instr->descriptor()->platform_specific_descriptor();
3890  __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
3891  call_descriptor->storage_mode());
3892  } else {
3893  ASSERT(instr->target()->IsRegister());
3894  Register target = ToRegister(instr->target());
3895  generator.BeforeCall(__ CallSize(target));
3896  __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3897  __ Call(target);
3898  }
3899  generator.AfterCall();
3900 }
3901 
3902 
3903 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3904  ASSERT(ToRegister(instr->function()).is(r1));
3905  ASSERT(ToRegister(instr->result()).is(r0));
3906 
3907  if (instr->hydrogen()->pass_argument_count()) {
3908  __ mov(r0, Operand(instr->arity()));
3909  }
3910 
3911  // Change context.
3913 
3914  // Load the code entry address
3916  __ Call(ip);
3917 
3918  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3919 }
3920 
3921 
3922 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3923  ASSERT(ToRegister(instr->context()).is(cp));
3924  ASSERT(ToRegister(instr->function()).is(r1));
3925  ASSERT(ToRegister(instr->result()).is(r0));
3926 
3927  int arity = instr->arity();
3928  CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
3929  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3930 }
3931 
3932 
3933 void LCodeGen::DoCallNew(LCallNew* instr) {
3934  ASSERT(ToRegister(instr->context()).is(cp));
3935  ASSERT(ToRegister(instr->constructor()).is(r1));
3936  ASSERT(ToRegister(instr->result()).is(r0));
3937 
3938  __ mov(r0, Operand(instr->arity()));
3939  // No cell in r2 for construct type feedback in optimized code
3940  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
3941  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3942  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3943 }
3944 
3945 
3946 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3947  ASSERT(ToRegister(instr->context()).is(cp));
3948  ASSERT(ToRegister(instr->constructor()).is(r1));
3949  ASSERT(ToRegister(instr->result()).is(r0));
3950 
3951  __ mov(r0, Operand(instr->arity()));
3952  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
3953  ElementsKind kind = instr->hydrogen()->elements_kind();
3954  AllocationSiteOverrideMode override_mode =
3957  : DONT_OVERRIDE;
3958 
3959  if (instr->arity() == 0) {
3960  ArrayNoArgumentConstructorStub stub(kind, override_mode);
3961  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3962  } else if (instr->arity() == 1) {
3963  Label done;
3964  if (IsFastPackedElementsKind(kind)) {
3965  Label packed_case;
3966  // We might need a change here
3967  // look at the first argument
3968  __ ldr(r5, MemOperand(sp, 0));
3969  __ cmp(r5, Operand::Zero());
3970  __ b(eq, &packed_case);
3971 
3972  ElementsKind holey_kind = GetHoleyElementsKind(kind);
3973  ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
3974  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3975  __ jmp(&done);
3976  __ bind(&packed_case);
3977  }
3978 
3979  ArraySingleArgumentConstructorStub stub(kind, override_mode);
3980  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3981  __ bind(&done);
3982  } else {
3983  ArrayNArgumentsConstructorStub stub(kind, override_mode);
3984  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3985  }
3986 }
3987 
3988 
3989 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3990  CallRuntime(instr->function(), instr->arity(), instr);
3991 }
3992 
3993 
3994 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3995  Register function = ToRegister(instr->function());
3996  Register code_object = ToRegister(instr->code_object());
3997  __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
3998  __ str(code_object,
4000 }
4001 
4002 
4003 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4004  Register result = ToRegister(instr->result());
4005  Register base = ToRegister(instr->base_object());
4006  if (instr->offset()->IsConstantOperand()) {
4007  LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4008  __ add(result, base, Operand(ToInteger32(offset)));
4009  } else {
4010  Register offset = ToRegister(instr->offset());
4011  __ add(result, base, offset);
4012  }
4013 }
4014 
4015 
4016 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4017  Representation representation = instr->representation();
4018 
4019  Register object = ToRegister(instr->object());
4020  Register scratch = scratch0();
4021  HObjectAccess access = instr->hydrogen()->access();
4022  int offset = access.offset();
4023 
4024  if (access.IsExternalMemory()) {
4025  Register value = ToRegister(instr->value());
4026  MemOperand operand = MemOperand(object, offset);
4027  __ Store(value, operand, representation);
4028  return;
4029  }
4030 
4031  Handle<Map> transition = instr->transition();
4032  SmiCheck check_needed =
4033  instr->hydrogen()->value()->IsHeapObject()
4035 
4036  ASSERT(!(representation.IsSmi() &&
4037  instr->value()->IsConstantOperand() &&
4038  !IsSmi(LConstantOperand::cast(instr->value()))));
4039  if (representation.IsHeapObject()) {
4040  Register value = ToRegister(instr->value());
4041  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4042  __ SmiTst(value);
4043  DeoptimizeIf(eq, instr->environment());
4044 
4045  // We know that value is a smi now, so we can omit the check below.
4046  check_needed = OMIT_SMI_CHECK;
4047  }
4048  } else if (representation.IsDouble()) {
4049  ASSERT(transition.is_null());
4050  ASSERT(access.IsInobject());
4051  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4052  DwVfpRegister value = ToDoubleRegister(instr->value());
4053  __ vstr(value, FieldMemOperand(object, offset));
4054  return;
4055  }
4056 
4057  if (!transition.is_null()) {
4058  __ mov(scratch, Operand(transition));
4059  __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4060  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4061  Register temp = ToRegister(instr->temp());
4062  // Update the write barrier for the map field.
4063  __ RecordWriteField(object,
4065  scratch,
4066  temp,
4068  kSaveFPRegs,
4070  OMIT_SMI_CHECK);
4071  }
4072  }
4073 
4074  // Do the store.
4075  Register value = ToRegister(instr->value());
4076  if (access.IsInobject()) {
4077  MemOperand operand = FieldMemOperand(object, offset);
4078  __ Store(value, operand, representation);
4079  if (instr->hydrogen()->NeedsWriteBarrier()) {
4080  // Update the write barrier for the object for in-object properties.
4081  __ RecordWriteField(object,
4082  offset,
4083  value,
4084  scratch,
4086  kSaveFPRegs,
4088  check_needed);
4089  }
4090  } else {
4091  __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4092  MemOperand operand = FieldMemOperand(scratch, offset);
4093  __ Store(value, operand, representation);
4094  if (instr->hydrogen()->NeedsWriteBarrier()) {
4095  // Update the write barrier for the properties array.
4096  // object is used as a scratch register.
4097  __ RecordWriteField(scratch,
4098  offset,
4099  value,
4100  object,
4102  kSaveFPRegs,
4104  check_needed);
4105  }
4106  }
4107 }
4108 
4109 
4110 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4111  ASSERT(ToRegister(instr->context()).is(cp));
4112  ASSERT(ToRegister(instr->object()).is(r1));
4113  ASSERT(ToRegister(instr->value()).is(r0));
4114 
4115  // Name is always in r2.
4116  __ mov(r2, Operand(instr->name()));
4117  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4118  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4119 }
4120 
4121 
4122 void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) {
4123  if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4124  Label done;
4125  __ b(NegateCondition(condition), &done);
4126  __ stop("eliminated bounds check failed");
4127  __ bind(&done);
4128  } else {
4129  DeoptimizeIf(condition, check->environment());
4130  }
4131 }
4132 
4133 
4134 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4135  if (instr->hydrogen()->skip_check()) return;
4136 
4137  if (instr->index()->IsConstantOperand()) {
4138  int constant_index =
4139  ToInteger32(LConstantOperand::cast(instr->index()));
4140  if (instr->hydrogen()->length()->representation().IsSmi()) {
4141  __ mov(ip, Operand(Smi::FromInt(constant_index)));
4142  } else {
4143  __ mov(ip, Operand(constant_index));
4144  }
4145  __ cmp(ip, ToRegister(instr->length()));
4146  } else {
4147  __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
4148  }
4149  Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
4150  ApplyCheckIf(condition, instr);
4151 }
4152 
4153 
4154 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4155  Register external_pointer = ToRegister(instr->elements());
4156  Register key = no_reg;
4157  ElementsKind elements_kind = instr->elements_kind();
4158  bool key_is_constant = instr->key()->IsConstantOperand();
4159  int constant_key = 0;
4160  if (key_is_constant) {
4161  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4162  if (constant_key & 0xF0000000) {
4163  Abort(kArrayIndexConstantValueTooBig);
4164  }
4165  } else {
4166  key = ToRegister(instr->key());
4167  }
4168  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4169  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4170  ? (element_size_shift - kSmiTagSize) : element_size_shift;
4171  int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
4173  : 0;
4174 
4175  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4176  elements_kind == FLOAT32_ELEMENTS ||
4177  elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4178  elements_kind == FLOAT64_ELEMENTS) {
4179  int base_offset =
4180  (instr->additional_index() << element_size_shift) + additional_offset;
4181  Register address = scratch0();
4182  DwVfpRegister value(ToDoubleRegister(instr->value()));
4183  if (key_is_constant) {
4184  if (constant_key != 0) {
4185  __ add(address, external_pointer,
4186  Operand(constant_key << element_size_shift));
4187  } else {
4188  address = external_pointer;
4189  }
4190  } else {
4191  __ add(address, external_pointer, Operand(key, LSL, shift_size));
4192  }
4193  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4194  elements_kind == FLOAT32_ELEMENTS) {
4195  __ vcvt_f32_f64(double_scratch0().low(), value);
4196  __ vstr(double_scratch0().low(), address, base_offset);
4197  } else { // Storing doubles, not floats.
4198  __ vstr(value, address, base_offset);
4199  }
4200  } else {
4201  Register value(ToRegister(instr->value()));
4202  MemOperand mem_operand = PrepareKeyedOperand(
4203  key, external_pointer, key_is_constant, constant_key,
4204  element_size_shift, shift_size,
4205  instr->additional_index(), additional_offset);
4206  switch (elements_kind) {
4210  case UINT8_ELEMENTS:
4212  case INT8_ELEMENTS:
4213  __ strb(value, mem_operand);
4214  break;
4217  case INT16_ELEMENTS:
4218  case UINT16_ELEMENTS:
4219  __ strh(value, mem_operand);
4220  break;
4223  case INT32_ELEMENTS:
4224  case UINT32_ELEMENTS:
4225  __ str(value, mem_operand);
4226  break;
4227  case FLOAT32_ELEMENTS:
4228  case FLOAT64_ELEMENTS:
4231  case FAST_DOUBLE_ELEMENTS:
4232  case FAST_ELEMENTS:
4233  case FAST_SMI_ELEMENTS:
4235  case FAST_HOLEY_ELEMENTS:
4237  case DICTIONARY_ELEMENTS:
4239  UNREACHABLE();
4240  break;
4241  }
4242  }
4243 }
4244 
4245 
4246 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4247  DwVfpRegister value = ToDoubleRegister(instr->value());
4248  Register elements = ToRegister(instr->elements());
4249  Register scratch = scratch0();
4250  DwVfpRegister double_scratch = double_scratch0();
4251  bool key_is_constant = instr->key()->IsConstantOperand();
4252 
4253  // Calculate the effective address of the slot in the array to store the
4254  // double value.
4255  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4256  if (key_is_constant) {
4257  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4258  if (constant_key & 0xF0000000) {
4259  Abort(kArrayIndexConstantValueTooBig);
4260  }
4261  __ add(scratch, elements,
4262  Operand((constant_key << element_size_shift) +
4264  } else {
4265  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4266  ? (element_size_shift - kSmiTagSize) : element_size_shift;
4267  __ add(scratch, elements,
4269  __ add(scratch, scratch,
4270  Operand(ToRegister(instr->key()), LSL, shift_size));
4271  }
4272 
4273  if (instr->NeedsCanonicalization()) {
4274  // Force a canonical NaN.
4275  if (masm()->emit_debug_code()) {
4276  __ vmrs(ip);
4277  __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4278  __ Assert(ne, kDefaultNaNModeNotSet);
4279  }
4280  __ VFPCanonicalizeNaN(double_scratch, value);
4281  __ vstr(double_scratch, scratch,
4282  instr->additional_index() << element_size_shift);
4283  } else {
4284  __ vstr(value, scratch, instr->additional_index() << element_size_shift);
4285  }
4286 }
4287 
4288 
4289 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4290  Register value = ToRegister(instr->value());
4291  Register elements = ToRegister(instr->elements());
4292  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4293  : no_reg;
4294  Register scratch = scratch0();
4295  Register store_base = scratch;
4296  int offset = 0;
4297 
4298  // Do the store.
4299  if (instr->key()->IsConstantOperand()) {
4300  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4301  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4302  offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4303  instr->additional_index());
4304  store_base = elements;
4305  } else {
4306  // Even though the HLoadKeyed instruction forces the input
4307  // representation for the key to be an integer, the input gets replaced
4308  // during bound check elimination with the index argument to the bounds
4309  // check, which can be tagged, so that case must be handled here, too.
4310  if (instr->hydrogen()->key()->representation().IsSmi()) {
4311  __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4312  } else {
4313  __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4314  }
4315  offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4316  }
4317  __ str(value, FieldMemOperand(store_base, offset));
4318 
4319  if (instr->hydrogen()->NeedsWriteBarrier()) {
4320  SmiCheck check_needed =
4321  instr->hydrogen()->value()->IsHeapObject()
4323  // Compute address of modified element and store it into key register.
4324  __ add(key, store_base, Operand(offset - kHeapObjectTag));
4325  __ RecordWrite(elements,
4326  key,
4327  value,
4329  kSaveFPRegs,
4331  check_needed);
4332  }
4333 }
4334 
4335 
4336 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4337  // By cases: external, fast double
4338  if (instr->is_typed_elements()) {
4339  DoStoreKeyedExternalArray(instr);
4340  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4341  DoStoreKeyedFixedDoubleArray(instr);
4342  } else {
4343  DoStoreKeyedFixedArray(instr);
4344  }
4345 }
4346 
4347 
4348 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4349  ASSERT(ToRegister(instr->context()).is(cp));
4350  ASSERT(ToRegister(instr->object()).is(r2));
4351  ASSERT(ToRegister(instr->key()).is(r1));
4352  ASSERT(ToRegister(instr->value()).is(r0));
4353 
4354  Handle<Code> ic = instr->strict_mode() == STRICT
4355  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4356  : isolate()->builtins()->KeyedStoreIC_Initialize();
4357  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4358 }
4359 
4360 
4361 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4362  Register object_reg = ToRegister(instr->object());
4363  Register scratch = scratch0();
4364 
4365  Handle<Map> from_map = instr->original_map();
4366  Handle<Map> to_map = instr->transitioned_map();
4367  ElementsKind from_kind = instr->from_kind();
4368  ElementsKind to_kind = instr->to_kind();
4369 
4370  Label not_applicable;
4371  __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4372  __ cmp(scratch, Operand(from_map));
4373  __ b(ne, &not_applicable);
4374 
4375  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4376  Register new_map_reg = ToRegister(instr->new_map_temp());
4377  __ mov(new_map_reg, Operand(to_map));
4378  __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4379  // Write barrier.
4380  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4382  } else {
4383  ASSERT(ToRegister(instr->context()).is(cp));
4384  PushSafepointRegistersScope scope(
4385  this, Safepoint::kWithRegistersAndDoubles);
4386  __ Move(r0, object_reg);
4387  __ Move(r1, to_map);
4388  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4389  TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
4390  __ CallStub(&stub);
4391  RecordSafepointWithRegistersAndDoubles(
4392  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4393  }
4394  __ bind(&not_applicable);
4395 }
4396 
4397 
4398 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4399  Register object = ToRegister(instr->object());
4400  Register temp = ToRegister(instr->temp());
4401  Label no_memento_found;
4402  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4403  DeoptimizeIf(eq, instr->environment());
4404  __ bind(&no_memento_found);
4405 }
4406 
4407 
4408 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4409  ASSERT(ToRegister(instr->context()).is(cp));
4410  ASSERT(ToRegister(instr->left()).is(r1));
4411  ASSERT(ToRegister(instr->right()).is(r0));
4412  StringAddStub stub(instr->hydrogen()->flags(),
4413  instr->hydrogen()->pretenure_flag());
4414  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4415 }
4416 
4417 
4418 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4419  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4420  public:
4421  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4422  : LDeferredCode(codegen), instr_(instr) { }
4423  virtual void Generate() V8_OVERRIDE {
4424  codegen()->DoDeferredStringCharCodeAt(instr_);
4425  }
4426  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4427  private:
4428  LStringCharCodeAt* instr_;
4429  };
4430 
4431  DeferredStringCharCodeAt* deferred =
4432  new(zone()) DeferredStringCharCodeAt(this, instr);
4433 
4435  ToRegister(instr->string()),
4436  ToRegister(instr->index()),
4437  ToRegister(instr->result()),
4438  deferred->entry());
4439  __ bind(deferred->exit());
4440 }
4441 
4442 
4443 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4444  Register string = ToRegister(instr->string());
4445  Register result = ToRegister(instr->result());
4446  Register scratch = scratch0();
4447 
4448  // TODO(3095996): Get rid of this. For now, we need to make the
4449  // result register contain a valid pointer because it is already
4450  // contained in the register pointer map.
4451  __ mov(result, Operand::Zero());
4452 
4453  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4454  __ push(string);
4455  // Push the index as a smi. This is safe because of the checks in
4456  // DoStringCharCodeAt above.
4457  if (instr->index()->IsConstantOperand()) {
4458  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4459  __ mov(scratch, Operand(Smi::FromInt(const_index)));
4460  __ push(scratch);
4461  } else {
4462  Register index = ToRegister(instr->index());
4463  __ SmiTag(index);
4464  __ push(index);
4465  }
4466  CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
4467  instr->context());
4468  __ AssertSmi(r0);
4469  __ SmiUntag(r0);
4470  __ StoreToSafepointRegisterSlot(r0, result);
4471 }
4472 
4473 
4474 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4475  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4476  public:
4477  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4478  : LDeferredCode(codegen), instr_(instr) { }
4479  virtual void Generate() V8_OVERRIDE {
4480  codegen()->DoDeferredStringCharFromCode(instr_);
4481  }
4482  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4483  private:
4484  LStringCharFromCode* instr_;
4485  };
4486 
4487  DeferredStringCharFromCode* deferred =
4488  new(zone()) DeferredStringCharFromCode(this, instr);
4489 
4490  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4491  Register char_code = ToRegister(instr->char_code());
4492  Register result = ToRegister(instr->result());
4493  ASSERT(!char_code.is(result));
4494 
4495  __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4496  __ b(hi, deferred->entry());
4497  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4498  __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4499  __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4500  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4501  __ cmp(result, ip);
4502  __ b(eq, deferred->entry());
4503  __ bind(deferred->exit());
4504 }
4505 
4506 
4507 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4508  Register char_code = ToRegister(instr->char_code());
4509  Register result = ToRegister(instr->result());
4510 
4511  // TODO(3095996): Get rid of this. For now, we need to make the
4512  // result register contain a valid pointer because it is already
4513  // contained in the register pointer map.
4514  __ mov(result, Operand::Zero());
4515 
4516  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4517  __ SmiTag(char_code);
4518  __ push(char_code);
4519  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4520  __ StoreToSafepointRegisterSlot(r0, result);
4521 }
4522 
4523 
4524 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4525  LOperand* input = instr->value();
4526  ASSERT(input->IsRegister() || input->IsStackSlot());
4527  LOperand* output = instr->result();
4528  ASSERT(output->IsDoubleRegister());
4529  SwVfpRegister single_scratch = double_scratch0().low();
4530  if (input->IsStackSlot()) {
4531  Register scratch = scratch0();
4532  __ ldr(scratch, ToMemOperand(input));
4533  __ vmov(single_scratch, scratch);
4534  } else {
4535  __ vmov(single_scratch, ToRegister(input));
4536  }
4537  __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4538 }
4539 
4540 
4541 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4542  LOperand* input = instr->value();
4543  LOperand* output = instr->result();
4544 
4545  SwVfpRegister flt_scratch = double_scratch0().low();
4546  __ vmov(flt_scratch, ToRegister(input));
4547  __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4548 }
4549 
4550 
4551 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4552  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4553  public:
4554  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4555  : LDeferredCode(codegen), instr_(instr) { }
4556  virtual void Generate() V8_OVERRIDE {
4557  codegen()->DoDeferredNumberTagIU(instr_,
4558  instr_->value(),
4559  instr_->temp1(),
4560  instr_->temp2(),
4561  SIGNED_INT32);
4562  }
4563  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4564  private:
4565  LNumberTagI* instr_;
4566  };
4567 
4568  Register src = ToRegister(instr->value());
4569  Register dst = ToRegister(instr->result());
4570 
4571  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4572  __ SmiTag(dst, src, SetCC);
4573  __ b(vs, deferred->entry());
4574  __ bind(deferred->exit());
4575 }
4576 
4577 
4578 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4579  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4580  public:
4581  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4582  : LDeferredCode(codegen), instr_(instr) { }
4583  virtual void Generate() V8_OVERRIDE {
4584  codegen()->DoDeferredNumberTagIU(instr_,
4585  instr_->value(),
4586  instr_->temp1(),
4587  instr_->temp2(),
4588  UNSIGNED_INT32);
4589  }
4590  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4591  private:
4592  LNumberTagU* instr_;
4593  };
4594 
4595  Register input = ToRegister(instr->value());
4596  Register result = ToRegister(instr->result());
4597 
4598  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4599  __ cmp(input, Operand(Smi::kMaxValue));
4600  __ b(hi, deferred->entry());
4601  __ SmiTag(result, input);
4602  __ bind(deferred->exit());
4603 }
4604 
4605 
4607  LOperand* value,
4608  LOperand* temp1,
4609  LOperand* temp2,
4610  IntegerSignedness signedness) {
4611  Label done, slow;
4612  Register src = ToRegister(value);
4613  Register dst = ToRegister(instr->result());
4614  Register tmp1 = scratch0();
4615  Register tmp2 = ToRegister(temp1);
4616  Register tmp3 = ToRegister(temp2);
4617  LowDwVfpRegister dbl_scratch = double_scratch0();
4618 
4619  if (signedness == SIGNED_INT32) {
4620  // There was overflow, so bits 30 and 31 of the original integer
4621  // disagree. Try to allocate a heap number in new space and store
4622  // the value in there. If that fails, call the runtime system.
4623  if (dst.is(src)) {
4624  __ SmiUntag(src, dst);
4625  __ eor(src, src, Operand(0x80000000));
4626  }
4627  __ vmov(dbl_scratch.low(), src);
4628  __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4629  } else {
4630  __ vmov(dbl_scratch.low(), src);
4631  __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4632  }
4633 
4634  if (FLAG_inline_new) {
4635  __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4636  __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4637  __ b(&done);
4638  }
4639 
4640  // Slow case: Call the runtime system to do the number allocation.
4641  __ bind(&slow);
4642  {
4643  // TODO(3095996): Put a valid pointer value in the stack slot where the
4644  // result register is stored, as this register is in the pointer map, but
4645  // contains an integer value.
4646  __ mov(dst, Operand::Zero());
4647 
4648  // Preserve the value of all registers.
4649  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4650 
4651  // NumberTagI and NumberTagD use the context from the frame, rather than
4652  // the environment's HContext or HInlinedContext value.
4653  // They only call Runtime::kHiddenAllocateHeapNumber.
4654  // The corresponding HChange instructions are added in a phase that does
4655  // not have easy access to the local context.
4657  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4658  RecordSafepointWithRegisters(
4659  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4660  __ sub(r0, r0, Operand(kHeapObjectTag));
4661  __ StoreToSafepointRegisterSlot(r0, dst);
4662  }
4663 
4664  // Done. Put the value in dbl_scratch into the value of the allocated heap
4665  // number.
4666  __ bind(&done);
4667  __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4668  __ add(dst, dst, Operand(kHeapObjectTag));
4669 }
4670 
4671 
4672 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4673  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4674  public:
4675  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4676  : LDeferredCode(codegen), instr_(instr) { }
4677  virtual void Generate() V8_OVERRIDE {
4678  codegen()->DoDeferredNumberTagD(instr_);
4679  }
4680  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4681  private:
4682  LNumberTagD* instr_;
4683  };
4684 
4685  DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4686  Register scratch = scratch0();
4687  Register reg = ToRegister(instr->result());
4688  Register temp1 = ToRegister(instr->temp());
4689  Register temp2 = ToRegister(instr->temp2());
4690 
4691  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4692  if (FLAG_inline_new) {
4693  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4694  // We want the untagged address first for performance
4695  __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4696  DONT_TAG_RESULT);
4697  } else {
4698  __ jmp(deferred->entry());
4699  }
4700  __ bind(deferred->exit());
4701  __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4702  // Now that we have finished with the object's real address tag it
4703  __ add(reg, reg, Operand(kHeapObjectTag));
4704 }
4705 
4706 
4707 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4708  // TODO(3095996): Get rid of this. For now, we need to make the
4709  // result register contain a valid pointer because it is already
4710  // contained in the register pointer map.
4711  Register reg = ToRegister(instr->result());
4712  __ mov(reg, Operand::Zero());
4713 
4714  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4715  // NumberTagI and NumberTagD use the context from the frame, rather than
4716  // the environment's HContext or HInlinedContext value.
4717  // They only call Runtime::kHiddenAllocateHeapNumber.
4718  // The corresponding HChange instructions are added in a phase that does
4719  // not have easy access to the local context.
4721  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4722  RecordSafepointWithRegisters(
4723  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4724  __ sub(r0, r0, Operand(kHeapObjectTag));
4725  __ StoreToSafepointRegisterSlot(r0, reg);
4726 }
4727 
4728 
4729 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4730  HChange* hchange = instr->hydrogen();
4731  Register input = ToRegister(instr->value());
4732  Register output = ToRegister(instr->result());
4733  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4734  hchange->value()->CheckFlag(HValue::kUint32)) {
4735  __ tst(input, Operand(0xc0000000));
4736  DeoptimizeIf(ne, instr->environment());
4737  }
4738  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4739  !hchange->value()->CheckFlag(HValue::kUint32)) {
4740  __ SmiTag(output, input, SetCC);
4741  DeoptimizeIf(vs, instr->environment());
4742  } else {
4743  __ SmiTag(output, input);
4744  }
4745 }
4746 
4747 
4748 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4749  Register input = ToRegister(instr->value());
4750  Register result = ToRegister(instr->result());
4751  if (instr->needs_check()) {
4753  // If the input is a HeapObject, SmiUntag will set the carry flag.
4754  __ SmiUntag(result, input, SetCC);
4755  DeoptimizeIf(cs, instr->environment());
4756  } else {
4757  __ SmiUntag(result, input);
4758  }
4759 }
4760 
4761 
4762 void LCodeGen::EmitNumberUntagD(Register input_reg,
4763  DwVfpRegister result_reg,
4764  bool can_convert_undefined_to_nan,
4765  bool deoptimize_on_minus_zero,
4766  LEnvironment* env,
4767  NumberUntagDMode mode) {
4768  Register scratch = scratch0();
4769  SwVfpRegister flt_scratch = double_scratch0().low();
4770  ASSERT(!result_reg.is(double_scratch0()));
4771  Label convert, load_smi, done;
4772  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4773  // Smi check.
4774  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4775  // Heap number map check.
4776  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4777  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4778  __ cmp(scratch, Operand(ip));
4779  if (can_convert_undefined_to_nan) {
4780  __ b(ne, &convert);
4781  } else {
4782  DeoptimizeIf(ne, env);
4783  }
4784  // load heap number
4785  __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4786  if (deoptimize_on_minus_zero) {
4787  __ VmovLow(scratch, result_reg);
4788  __ cmp(scratch, Operand::Zero());
4789  __ b(ne, &done);
4790  __ VmovHigh(scratch, result_reg);
4791  __ cmp(scratch, Operand(HeapNumber::kSignMask));
4792  DeoptimizeIf(eq, env);
4793  }
4794  __ jmp(&done);
4795  if (can_convert_undefined_to_nan) {
4796  __ bind(&convert);
4797  // Convert undefined (and hole) to NaN.
4798  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4799  __ cmp(input_reg, Operand(ip));
4800  DeoptimizeIf(ne, env);
4801  __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4802  __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4803  __ jmp(&done);
4804  }
4805  } else {
4806  __ SmiUntag(scratch, input_reg);
4808  }
4809  // Smi to double register conversion
4810  __ bind(&load_smi);
4811  // scratch: untagged value of input_reg
4812  __ vmov(flt_scratch, scratch);
4813  __ vcvt_f64_s32(result_reg, flt_scratch);
4814  __ bind(&done);
4815 }
4816 
4817 
4818 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4819  Register input_reg = ToRegister(instr->value());
4820  Register scratch1 = scratch0();
4821  Register scratch2 = ToRegister(instr->temp());
4822  LowDwVfpRegister double_scratch = double_scratch0();
4823  DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4824 
4825  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4826  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4827 
4828  Label done;
4829 
4830  // The input was optimistically untagged; revert it.
4831  // The carry flag is set when we reach this deferred code as we just executed
4832  // SmiUntag(heap_object, SetCC)
4834  __ adc(scratch2, input_reg, Operand(input_reg));
4835 
4836  // Heap number map check.
4837  __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
4838  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4839  __ cmp(scratch1, Operand(ip));
4840 
4841  if (instr->truncating()) {
4842  // Performs a truncating conversion of a floating point number as used by
4843  // the JS bitwise operations.
4844  Label no_heap_number, check_bools, check_false;
4845  __ b(ne, &no_heap_number);
4846  __ TruncateHeapNumberToI(input_reg, scratch2);
4847  __ b(&done);
4848 
4849  // Check for Oddballs. Undefined/False is converted to zero and True to one
4850  // for truncating conversions.
4851  __ bind(&no_heap_number);
4852  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4853  __ cmp(scratch2, Operand(ip));
4854  __ b(ne, &check_bools);
4855  __ mov(input_reg, Operand::Zero());
4856  __ b(&done);
4857 
4858  __ bind(&check_bools);
4859  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4860  __ cmp(scratch2, Operand(ip));
4861  __ b(ne, &check_false);
4862  __ mov(input_reg, Operand(1));
4863  __ b(&done);
4864 
4865  __ bind(&check_false);
4866  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4867  __ cmp(scratch2, Operand(ip));
4868  DeoptimizeIf(ne, instr->environment());
4869  __ mov(input_reg, Operand::Zero());
4870  __ b(&done);
4871  } else {
4872  // Deoptimize if we don't have a heap number.
4873  DeoptimizeIf(ne, instr->environment());
4874 
4875  __ sub(ip, scratch2, Operand(kHeapObjectTag));
4876  __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
4877  __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
4878  DeoptimizeIf(ne, instr->environment());
4879 
4880  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4881  __ cmp(input_reg, Operand::Zero());
4882  __ b(ne, &done);
4883  __ VmovHigh(scratch1, double_scratch2);
4884  __ tst(scratch1, Operand(HeapNumber::kSignMask));
4885  DeoptimizeIf(ne, instr->environment());
4886  }
4887  }
4888  __ bind(&done);
4889 }
4890 
4891 
4892 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4893  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
4894  public:
4895  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4896  : LDeferredCode(codegen), instr_(instr) { }
4897  virtual void Generate() V8_OVERRIDE {
4898  codegen()->DoDeferredTaggedToI(instr_);
4899  }
4900  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4901  private:
4902  LTaggedToI* instr_;
4903  };
4904 
4905  LOperand* input = instr->value();
4906  ASSERT(input->IsRegister());
4907  ASSERT(input->Equals(instr->result()));
4908 
4909  Register input_reg = ToRegister(input);
4910 
4911  if (instr->hydrogen()->value()->representation().IsSmi()) {
4912  __ SmiUntag(input_reg);
4913  } else {
4914  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4915 
4916  // Optimistically untag the input.
4917  // If the input is a HeapObject, SmiUntag will set the carry flag.
4918  __ SmiUntag(input_reg, SetCC);
4919  // Branch to deferred code if the input was tagged.
4920  // The deferred code will take care of restoring the tag.
4921  __ b(cs, deferred->entry());
4922  __ bind(deferred->exit());
4923  }
4924 }
4925 
4926 
4927 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4928  LOperand* input = instr->value();
4929  ASSERT(input->IsRegister());
4930  LOperand* result = instr->result();
4931  ASSERT(result->IsDoubleRegister());
4932 
4933  Register input_reg = ToRegister(input);
4934  DwVfpRegister result_reg = ToDoubleRegister(result);
4935 
4936  HValue* value = instr->hydrogen()->value();
4937  NumberUntagDMode mode = value->representation().IsSmi()
4939 
4940  EmitNumberUntagD(input_reg, result_reg,
4941  instr->hydrogen()->can_convert_undefined_to_nan(),
4942  instr->hydrogen()->deoptimize_on_minus_zero(),
4943  instr->environment(),
4944  mode);
4945 }
4946 
4947 
4948 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4949  Register result_reg = ToRegister(instr->result());
4950  Register scratch1 = scratch0();
4951  DwVfpRegister double_input = ToDoubleRegister(instr->value());
4952  LowDwVfpRegister double_scratch = double_scratch0();
4953 
4954  if (instr->truncating()) {
4955  __ TruncateDoubleToI(result_reg, double_input);
4956  } else {
4957  __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
4958  // Deoptimize if the input wasn't a int32 (inside a double).
4959  DeoptimizeIf(ne, instr->environment());
4960  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4961  Label done;
4962  __ cmp(result_reg, Operand::Zero());
4963  __ b(ne, &done);
4964  __ VmovHigh(scratch1, double_input);
4965  __ tst(scratch1, Operand(HeapNumber::kSignMask));
4966  DeoptimizeIf(ne, instr->environment());
4967  __ bind(&done);
4968  }
4969  }
4970 }
4971 
4972 
4973 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4974  Register result_reg = ToRegister(instr->result());
4975  Register scratch1 = scratch0();
4976  DwVfpRegister double_input = ToDoubleRegister(instr->value());
4977  LowDwVfpRegister double_scratch = double_scratch0();
4978 
4979  if (instr->truncating()) {
4980  __ TruncateDoubleToI(result_reg, double_input);
4981  } else {
4982  __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
4983  // Deoptimize if the input wasn't a int32 (inside a double).
4984  DeoptimizeIf(ne, instr->environment());
4985  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4986  Label done;
4987  __ cmp(result_reg, Operand::Zero());
4988  __ b(ne, &done);
4989  __ VmovHigh(scratch1, double_input);
4990  __ tst(scratch1, Operand(HeapNumber::kSignMask));
4991  DeoptimizeIf(ne, instr->environment());
4992  __ bind(&done);
4993  }
4994  }
4995  __ SmiTag(result_reg, SetCC);
4996  DeoptimizeIf(vs, instr->environment());
4997 }
4998 
4999 
5000 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5001  LOperand* input = instr->value();
5002  __ SmiTst(ToRegister(input));
5003  DeoptimizeIf(ne, instr->environment());
5004 }
5005 
5006 
5007 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5008  if (!instr->hydrogen()->value()->IsHeapObject()) {
5009  LOperand* input = instr->value();
5010  __ SmiTst(ToRegister(input));
5011  DeoptimizeIf(eq, instr->environment());
5012  }
5013 }
5014 
5015 
5016 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5017  Register input = ToRegister(instr->value());
5018  Register scratch = scratch0();
5019 
5020  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5021  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5022 
5023  if (instr->hydrogen()->is_interval_check()) {
5024  InstanceType first;
5025  InstanceType last;
5026  instr->hydrogen()->GetCheckInterval(&first, &last);
5027 
5028  __ cmp(scratch, Operand(first));
5029 
5030  // If there is only one type in the interval check for equality.
5031  if (first == last) {
5032  DeoptimizeIf(ne, instr->environment());
5033  } else {
5034  DeoptimizeIf(lo, instr->environment());
5035  // Omit check for the last type.
5036  if (last != LAST_TYPE) {
5037  __ cmp(scratch, Operand(last));
5038  DeoptimizeIf(hi, instr->environment());
5039  }
5040  }
5041  } else {
5042  uint8_t mask;
5043  uint8_t tag;
5044  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5045 
5046  if (IsPowerOf2(mask)) {
5047  ASSERT(tag == 0 || IsPowerOf2(tag));
5048  __ tst(scratch, Operand(mask));
5049  DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
5050  } else {
5051  __ and_(scratch, scratch, Operand(mask));
5052  __ cmp(scratch, Operand(tag));
5053  DeoptimizeIf(ne, instr->environment());
5054  }
5055  }
5056 }
5057 
5058 
5059 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5060  Register reg = ToRegister(instr->value());
5061  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5063  if (isolate()->heap()->InNewSpace(*object)) {
5064  Register reg = ToRegister(instr->value());
5065  Handle<Cell> cell = isolate()->factory()->NewCell(object);
5066  __ mov(ip, Operand(Handle<Object>(cell)));
5068  __ cmp(reg, ip);
5069  } else {
5070  __ cmp(reg, Operand(object));
5071  }
5072  DeoptimizeIf(ne, instr->environment());
5073 }
5074 
5075 
5076 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5077  {
5078  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5079  __ push(object);
5080  __ mov(cp, Operand::Zero());
5081  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5082  RecordSafepointWithRegisters(
5083  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5084  __ StoreToSafepointRegisterSlot(r0, scratch0());
5085  }
5086  __ tst(scratch0(), Operand(kSmiTagMask));
5087  DeoptimizeIf(eq, instr->environment());
5088 }
5089 
5090 
5091 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5092  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5093  public:
5094  DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5095  : LDeferredCode(codegen), instr_(instr), object_(object) {
5096  SetExit(check_maps());
5097  }
5098  virtual void Generate() V8_OVERRIDE {
5099  codegen()->DoDeferredInstanceMigration(instr_, object_);
5100  }
5101  Label* check_maps() { return &check_maps_; }
5102  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5103  private:
5104  LCheckMaps* instr_;
5105  Label check_maps_;
5106  Register object_;
5107  };
5108 
5109  if (instr->hydrogen()->CanOmitMapChecks()) return;
5110  Register map_reg = scratch0();
5111 
5112  LOperand* input = instr->value();
5113  ASSERT(input->IsRegister());
5114  Register reg = ToRegister(input);
5115 
5116  __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5117 
5118  DeferredCheckMaps* deferred = NULL;
5119  if (instr->hydrogen()->has_migration_target()) {
5120  deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5121  __ bind(deferred->check_maps());
5122  }
5123 
5124  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5125  Label success;
5126  for (int i = 0; i < map_set.size() - 1; i++) {
5127  Handle<Map> map = map_set.at(i).handle();
5128  __ CompareMap(map_reg, map, &success);
5129  __ b(eq, &success);
5130  }
5131 
5132  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5133  __ CompareMap(map_reg, map, &success);
5134  if (instr->hydrogen()->has_migration_target()) {
5135  __ b(ne, deferred->entry());
5136  } else {
5137  DeoptimizeIf(ne, instr->environment());
5138  }
5139 
5140  __ bind(&success);
5141 }
5142 
5143 
5144 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5145  DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5146  Register result_reg = ToRegister(instr->result());
5147  __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5148 }
5149 
5150 
5151 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5152  Register unclamped_reg = ToRegister(instr->unclamped());
5153  Register result_reg = ToRegister(instr->result());
5154  __ ClampUint8(result_reg, unclamped_reg);
5155 }
5156 
5157 
5158 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5159  Register scratch = scratch0();
5160  Register input_reg = ToRegister(instr->unclamped());
5161  Register result_reg = ToRegister(instr->result());
5162  DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5163  Label is_smi, done, heap_number;
5164 
5165  // Both smi and heap number cases are handled.
5166  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5167 
5168  // Check for heap number
5169  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5170  __ cmp(scratch, Operand(factory()->heap_number_map()));
5171  __ b(eq, &heap_number);
5172 
5173  // Check for undefined. Undefined is converted to zero for clamping
5174  // conversions.
5175  __ cmp(input_reg, Operand(factory()->undefined_value()));
5176  DeoptimizeIf(ne, instr->environment());
5177  __ mov(result_reg, Operand::Zero());
5178  __ jmp(&done);
5179 
5180  // Heap number
5181  __ bind(&heap_number);
5182  __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5183  __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5184  __ jmp(&done);
5185 
5186  // smi
5187  __ bind(&is_smi);
5188  __ ClampUint8(result_reg, result_reg);
5189 
5190  __ bind(&done);
5191 }
5192 
5193 
5194 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5195  DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5196  Register result_reg = ToRegister(instr->result());
5197  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5198  __ VmovHigh(result_reg, value_reg);
5199  } else {
5200  __ VmovLow(result_reg, value_reg);
5201  }
5202 }
5203 
5204 
5205 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5206  Register hi_reg = ToRegister(instr->hi());
5207  Register lo_reg = ToRegister(instr->lo());
5208  DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5209  __ VmovHigh(result_reg, hi_reg);
5210  __ VmovLow(result_reg, lo_reg);
5211 }
5212 
5213 
5214 void LCodeGen::DoAllocate(LAllocate* instr) {
5215  class DeferredAllocate V8_FINAL : public LDeferredCode {
5216  public:
5217  DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5218  : LDeferredCode(codegen), instr_(instr) { }
5219  virtual void Generate() V8_OVERRIDE {
5220  codegen()->DoDeferredAllocate(instr_);
5221  }
5222  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5223  private:
5224  LAllocate* instr_;
5225  };
5226 
5227  DeferredAllocate* deferred =
5228  new(zone()) DeferredAllocate(this, instr);
5229 
5230  Register result = ToRegister(instr->result());
5231  Register scratch = ToRegister(instr->temp1());
5232  Register scratch2 = ToRegister(instr->temp2());
5233 
5234  // Allocate memory for the object.
5236  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5237  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5238  }
5239  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5240  ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5241  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5242  flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5243  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5244  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5245  flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5246  }
5247 
5248  if (instr->size()->IsConstantOperand()) {
5249  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5250  if (size <= Page::kMaxRegularHeapObjectSize) {
5251  __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5252  } else {
5253  __ jmp(deferred->entry());
5254  }
5255  } else {
5256  Register size = ToRegister(instr->size());
5257  __ Allocate(size,
5258  result,
5259  scratch,
5260  scratch2,
5261  deferred->entry(),
5262  flags);
5263  }
5264 
5265  __ bind(deferred->exit());
5266 
5267  if (instr->hydrogen()->MustPrefillWithFiller()) {
5268  if (instr->size()->IsConstantOperand()) {
5269  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5270  __ mov(scratch, Operand(size));
5271  } else {
5272  scratch = ToRegister(instr->size());
5273  }
5274  __ sub(scratch, scratch, Operand(kPointerSize));
5275  __ sub(result, result, Operand(kHeapObjectTag));
5276  Label loop;
5277  __ bind(&loop);
5278  __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5279  __ str(scratch2, MemOperand(result, scratch));
5280  __ sub(scratch, scratch, Operand(kPointerSize));
5281  __ cmp(scratch, Operand(0));
5282  __ b(ge, &loop);
5283  __ add(result, result, Operand(kHeapObjectTag));
5284  }
5285 }
5286 
5287 
5288 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5289  Register result = ToRegister(instr->result());
5290 
5291  // TODO(3095996): Get rid of this. For now, we need to make the
5292  // result register contain a valid pointer because it is already
5293  // contained in the register pointer map.
5294  __ mov(result, Operand(Smi::FromInt(0)));
5295 
5296  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5297  if (instr->size()->IsRegister()) {
5298  Register size = ToRegister(instr->size());
5299  ASSERT(!size.is(result));
5300  __ SmiTag(size);
5301  __ push(size);
5302  } else {
5303  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5304  __ Push(Smi::FromInt(size));
5305  }
5306 
5307  int flags = AllocateDoubleAlignFlag::encode(
5308  instr->hydrogen()->MustAllocateDoubleAligned());
5309  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5310  ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5311  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5313  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5314  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5316  } else {
5317  flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5318  }
5319  __ Push(Smi::FromInt(flags));
5320 
5321  CallRuntimeFromDeferred(
5322  Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5323  __ StoreToSafepointRegisterSlot(r0, result);
5324 }
5325 
5326 
5327 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5328  ASSERT(ToRegister(instr->value()).is(r0));
5329  __ push(r0);
5330  CallRuntime(Runtime::kToFastProperties, 1, instr);
5331 }
5332 
5333 
5334 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5335  ASSERT(ToRegister(instr->context()).is(cp));
5336  Label materialized;
5337  // Registers will be used as follows:
5338  // r6 = literals array.
5339  // r1 = regexp literal.
5340  // r0 = regexp literal clone.
5341  // r2-5 are used as temporaries.
5342  int literal_offset =
5343  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5344  __ Move(r6, instr->hydrogen()->literals());
5345  __ ldr(r1, FieldMemOperand(r6, literal_offset));
5346  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5347  __ cmp(r1, ip);
5348  __ b(ne, &materialized);
5349 
5350  // Create regexp literal using runtime function
5351  // Result will be in r0.
5352  __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5353  __ mov(r4, Operand(instr->hydrogen()->pattern()));
5354  __ mov(r3, Operand(instr->hydrogen()->flags()));
5355  __ Push(r6, r5, r4, r3);
5356  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5357  __ mov(r1, r0);
5358 
5359  __ bind(&materialized);
5361  Label allocated, runtime_allocate;
5362 
5363  __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5364  __ jmp(&allocated);
5365 
5366  __ bind(&runtime_allocate);
5367  __ mov(r0, Operand(Smi::FromInt(size)));
5368  __ Push(r1, r0);
5369  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5370  __ pop(r1);
5371 
5372  __ bind(&allocated);
5373  // Copy the content into the newly allocated memory.
5374  __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5375 }
5376 
5377 
5378 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5379  ASSERT(ToRegister(instr->context()).is(cp));
5380  // Use the fast case closure allocation code that allocates in new
5381  // space for nested functions that don't need literals cloning.
5382  bool pretenure = instr->hydrogen()->pretenure();
5383  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5384  FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
5385  instr->hydrogen()->is_generator());
5386  __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5387  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5388  } else {
5389  __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5390  __ mov(r1, Operand(pretenure ? factory()->true_value()
5391  : factory()->false_value()));
5392  __ Push(cp, r2, r1);
5393  CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
5394  }
5395 }
5396 
5397 
5398 void LCodeGen::DoTypeof(LTypeof* instr) {
5399  Register input = ToRegister(instr->value());
5400  __ push(input);
5401  CallRuntime(Runtime::kTypeof, 1, instr);
5402 }
5403 
5404 
5405 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5406  Register input = ToRegister(instr->value());
5407 
5408  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5409  instr->FalseLabel(chunk_),
5410  input,
5411  instr->type_literal());
5412  if (final_branch_condition != kNoCondition) {
5413  EmitBranch(instr, final_branch_condition);
5414  }
5415 }
5416 
5417 
5418 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5419  Label* false_label,
5420  Register input,
5421  Handle<String> type_name) {
5422  Condition final_branch_condition = kNoCondition;
5423  Register scratch = scratch0();
5424  if (type_name->Equals(heap()->number_string())) {
5425  __ JumpIfSmi(input, true_label);
5426  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5427  __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5428  final_branch_condition = eq;
5429 
5430  } else if (type_name->Equals(heap()->string_string())) {
5431  __ JumpIfSmi(input, false_label);
5432  __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5433  __ b(ge, false_label);
5434  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5435  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5436  final_branch_condition = eq;
5437 
5438  } else if (type_name->Equals(heap()->symbol_string())) {
5439  __ JumpIfSmi(input, false_label);
5440  __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5441  final_branch_condition = eq;
5442 
5443  } else if (type_name->Equals(heap()->boolean_string())) {
5444  __ CompareRoot(input, Heap::kTrueValueRootIndex);
5445  __ b(eq, true_label);
5446  __ CompareRoot(input, Heap::kFalseValueRootIndex);
5447  final_branch_condition = eq;
5448 
5449  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5450  __ CompareRoot(input, Heap::kNullValueRootIndex);
5451  final_branch_condition = eq;
5452 
5453  } else if (type_name->Equals(heap()->undefined_string())) {
5454  __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5455  __ b(eq, true_label);
5456  __ JumpIfSmi(input, false_label);
5457  // Check for undetectable objects => true.
5458  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5459  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5460  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5461  final_branch_condition = ne;
5462 
5463  } else if (type_name->Equals(heap()->function_string())) {
5465  Register type_reg = scratch;
5466  __ JumpIfSmi(input, false_label);
5467  __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5468  __ b(eq, true_label);
5469  __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5470  final_branch_condition = eq;
5471 
5472  } else if (type_name->Equals(heap()->object_string())) {
5473  Register map = scratch;
5474  __ JumpIfSmi(input, false_label);
5475  if (!FLAG_harmony_typeof) {
5476  __ CompareRoot(input, Heap::kNullValueRootIndex);
5477  __ b(eq, true_label);
5478  }
5479  __ CheckObjectTypeRange(input,
5480  map,
5483  false_label);
5484  // Check for undetectable objects => false.
5485  __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5486  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5487  final_branch_condition = eq;
5488 
5489  } else {
5490  __ b(false_label);
5491  }
5492 
5493  return final_branch_condition;
5494 }
5495 
5496 
5497 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5498  Register temp1 = ToRegister(instr->temp());
5499 
5500  EmitIsConstructCall(temp1, scratch0());
5501  EmitBranch(instr, eq);
5502 }
5503 
5504 
5505 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5506  ASSERT(!temp1.is(temp2));
5507  // Get the frame pointer for the calling frame.
5509 
5510  // Skip the arguments adaptor frame if it exists.
5512  __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5514 
5515  // Check the marker in the calling frame.
5517  __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5518 }
5519 
5520 
5521 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5522  if (!info()->IsStub()) {
5523  // Ensure that we have enough space after the previous lazy-bailout
5524  // instruction for patching the code here.
5525  int current_pc = masm()->pc_offset();
5526  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5527  // Block literal pool emission for duration of padding.
5528  Assembler::BlockConstPoolScope block_const_pool(masm());
5529  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5530  ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5531  while (padding_size > 0) {
5532  __ nop();
5533  padding_size -= Assembler::kInstrSize;
5534  }
5535  }
5536  }
5537  last_lazy_deopt_pc_ = masm()->pc_offset();
5538 }
5539 
5540 
5541 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5542  last_lazy_deopt_pc_ = masm()->pc_offset();
5543  ASSERT(instr->HasEnvironment());
5544  LEnvironment* env = instr->environment();
5545  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5546  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5547 }
5548 
5549 
5550 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5551  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5552  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5553  // needed return address), even though the implementation of LAZY and EAGER is
5554  // now identical. When LAZY is eventually completely folded into EAGER, remove
5555  // the special case below.
5556  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5557  type = Deoptimizer::LAZY;
5558  }
5559 
5560  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5561  DeoptimizeIf(al, instr->environment(), type);
5562 }
5563 
5564 
5565 void LCodeGen::DoDummy(LDummy* instr) {
5566  // Nothing to see here, move on!
5567 }
5568 
5569 
5570 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5571  // Nothing to see here, move on!
5572 }
5573 
5574 
5575 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5576  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5577  LoadContextFromDeferred(instr->context());
5578  __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
5579  RecordSafepointWithLazyDeopt(
5580  instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5581  ASSERT(instr->HasEnvironment());
5582  LEnvironment* env = instr->environment();
5583  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5584 }
5585 
5586 
5587 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5588  class DeferredStackCheck V8_FINAL : public LDeferredCode {
5589  public:
5590  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5591  : LDeferredCode(codegen), instr_(instr) { }
5592  virtual void Generate() V8_OVERRIDE {
5593  codegen()->DoDeferredStackCheck(instr_);
5594  }
5595  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5596  private:
5597  LStackCheck* instr_;
5598  };
5599 
5600  ASSERT(instr->HasEnvironment());
5601  LEnvironment* env = instr->environment();
5602  // There is no LLazyBailout instruction for stack-checks. We have to
5603  // prepare for lazy deoptimization explicitly here.
5604  if (instr->hydrogen()->is_function_entry()) {
5605  // Perform stack overflow check.
5606  Label done;
5607  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5608  __ cmp(sp, Operand(ip));
5609  __ b(hs, &done);
5610  PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
5611  ASSERT(instr->context()->IsRegister());
5612  ASSERT(ToRegister(instr->context()).is(cp));
5613  CallCode(isolate()->builtins()->StackCheck(),
5614  RelocInfo::CODE_TARGET,
5615  instr);
5616  __ bind(&done);
5617  } else {
5618  ASSERT(instr->hydrogen()->is_backwards_branch());
5619  // Perform stack overflow check if this goto needs it before jumping.
5620  DeferredStackCheck* deferred_stack_check =
5621  new(zone()) DeferredStackCheck(this, instr);
5622  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5623  __ cmp(sp, Operand(ip));
5624  __ b(lo, deferred_stack_check->entry());
5625  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5626  __ bind(instr->done_label());
5627  deferred_stack_check->SetExit(instr->done_label());
5628  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5629  // Don't record a deoptimization index for the safepoint here.
5630  // This will be done explicitly when emitting call and the safepoint in
5631  // the deferred code.
5632  }
5633 }
5634 
5635 
5636 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5637  // This is a pseudo-instruction that ensures that the environment here is
5638  // properly registered for deoptimization and records the assembler's PC
5639  // offset.
5640  LEnvironment* environment = instr->environment();
5641 
5642  // If the environment were already registered, we would have no way of
5643  // backpatching it with the spill slot operands.
5644  ASSERT(!environment->HasBeenRegistered());
5645  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5646 
5647  GenerateOsrPrologue();
5648 }
5649 
5650 
5651 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5652  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5653  __ cmp(r0, ip);
5654  DeoptimizeIf(eq, instr->environment());
5655 
5656  Register null_value = r5;
5657  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5658  __ cmp(r0, null_value);
5659  DeoptimizeIf(eq, instr->environment());
5660 
5661  __ SmiTst(r0);
5662  DeoptimizeIf(eq, instr->environment());
5663 
5665  __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5666  DeoptimizeIf(le, instr->environment());
5667 
5668  Label use_cache, call_runtime;
5669  __ CheckEnumCache(null_value, &call_runtime);
5670 
5672  __ b(&use_cache);
5673 
5674  // Get the set of properties to enumerate.
5675  __ bind(&call_runtime);
5676  __ push(r0);
5677  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5678 
5680  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5681  __ cmp(r1, ip);
5682  DeoptimizeIf(ne, instr->environment());
5683  __ bind(&use_cache);
5684 }
5685 
5686 
5687 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5688  Register map = ToRegister(instr->map());
5689  Register result = ToRegister(instr->result());
5690  Label load_cache, done;
5691  __ EnumLength(result, map);
5692  __ cmp(result, Operand(Smi::FromInt(0)));
5693  __ b(ne, &load_cache);
5694  __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5695  __ jmp(&done);
5696 
5697  __ bind(&load_cache);
5698  __ LoadInstanceDescriptors(map, result);
5699  __ ldr(result,
5701  __ ldr(result,
5702  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5703  __ cmp(result, Operand::Zero());
5704  DeoptimizeIf(eq, instr->environment());
5705 
5706  __ bind(&done);
5707 }
5708 
5709 
5710 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5711  Register object = ToRegister(instr->value());
5712  Register map = ToRegister(instr->map());
5713  __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5714  __ cmp(map, scratch0());
5715  DeoptimizeIf(ne, instr->environment());
5716 }
5717 
5718 
5719 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5720  Register object = ToRegister(instr->object());
5721  Register index = ToRegister(instr->index());
5722  Register result = ToRegister(instr->result());
5723  Register scratch = scratch0();
5724 
5725  Label out_of_object, done;
5726  __ cmp(index, Operand::Zero());
5727  __ b(lt, &out_of_object);
5728 
5729  __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5730  __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5731 
5732  __ b(&done);
5733 
5734  __ bind(&out_of_object);
5735  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5736  // Index is equal to negated out of object property index plus 1.
5738  __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5739  __ ldr(result, FieldMemOperand(scratch,
5740  FixedArray::kHeaderSize - kPointerSize));
5741  __ bind(&done);
5742 }
5743 
5744 
5745 #undef __
5746 
5747 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1267
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static DwVfpRegister FromAllocationIndex(int index)
void FinishCode(Handle< Code > code)
int index() const
Definition: lithium.h:61
static const int kHashFieldOffset
Definition: objects.h:8629
const int kMinInt
Definition: globals.h:249
static const int kBitFieldOffset
Definition: objects.h:6461
MemOperand ToHighMemOperand(LOperand *op) const
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
const intptr_t kSmiTagMask
Definition: v8.h:5480
static const int kCodeEntryOffset
Definition: objects.h:7518
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
static const int kValueOffset
Definition: objects.h:9547
const Register r3
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static const int kEnumCacheOffset
Definition: objects.h:3499
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const Register cp
int StackSlotOffset(int index)
Definition: lithium.cc:240
const LowDwVfpRegister d0
RegisterType type() const
ptrdiff_t offset() const
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
Smi * ToSmi(LConstantOperand *op) const
const int KB
Definition: globals.h:245
static TypeFeedbackId None()
Definition: utils.h:1149
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
Register EmitLoadRegister(LOperand *op, Register scratch)
static Handle< Code > GetUninitialized(Isolate *isolate, Token::Value op)
Definition: ic.cc:2489
static const int kDataOffset
Definition: objects.h:4970
bool IsSmi(LConstantOperand *op) const
static Handle< T > cast(Handle< S > that)
Definition: handles.h:75
static const int kGlobalReceiverOffset
Definition: objects.h:7613
static Representation Integer32()
const Register r6
int int32_t
Definition: unicode.cc:47
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8673
static bool IsSupported(CpuFeature f)
Definition: assembler-arm.h:68
AllocationSiteOverrideMode
Definition: code-stubs.h:759
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
static const int kSize
Definition: objects.h:7922
#define ASSERT(condition)
Definition: checks.h:329
friend class BlockConstPoolScope
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
static const int kContextOffset
Definition: frames.h:185
const int kPointerSizeLog2
Definition: globals.h:281
static const int kInObjectFieldCount
Definition: objects.h:7976
const uint32_t kStringRepresentationMask
Definition: objects.h:615
MemOperand GlobalObjectOperand()
const Register r2
static const int kCallerFPOffset
Definition: frames.h:188
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
static Handle< Code > initialize_stub(Isolate *isolate, ExtraICState extra_state)
Definition: ic.cc:782
static const int kInstanceClassNameOffset
Definition: objects.h:7107
int WhichPowerOf2(uint32_t x)
Definition: utils.h:57
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Variable * parameter(int index) const
Definition: scopes.h:333
MemOperand ContextOperand(Register context, int index)
const Register pp
static const int kContextOffset
Definition: objects.h:7523
virtual LOperand * result() const =0
#define V8_INFINITY
Definition: globals.h:44
void DoDeferredStackCheck(LStackCheck *instr)
DwVfpRegister EmitLoadDoubleRegister(LOperand *op, SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch)
int LookupDestination(int block_id) const
Condition ReverseCondition(Condition cond)
#define IN
Operand ToOperand(LOperand *op)
const Register sp
const uint32_t kVFPDefaultNaNModeControlBit
const uint32_t kSlotsZapValue
Definition: v8globals.h:86
#define UNREACHABLE()
Definition: checks.h:52
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:261
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
static const int kLengthOffset
Definition: objects.h:8905
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const int kDoubleSize
Definition: globals.h:266
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:7098
void DoDeferredNumberTagD(LNumberTagD *instr)
static uint32_t update(uint32_tprevious, AllocationSpacevalue)
Definition: utils.h:296
const Register ip
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:683
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
Definition: flags.cc:211
static void MaybeCallEntryHook(MacroAssembler *masm)
DwVfpRegister ToDoubleRegister(LOperand *op) const
const int kHeapObjectTag
Definition: v8.h:5473
void DoDeferredAllocate(LAllocate *instr)
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
Definition: deoptimizer.cc:701
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
static const int kCallerSPOffset
Definition: frames.h:190
static const int kCacheStampOffset
Definition: objects.h:7787
const Register pc
bool IsFixedTypedArrayElementsKind(ElementsKind kind)
static const int kPropertiesOffset
Definition: objects.h:2755
int32_t ToInteger32(LConstantOperand *op) const
int num_parameters() const
Definition: scopes.h:338
bool IsInteger32(LConstantOperand *op) const
static const int kMarkerOffset
Definition: frames.h:184
bool IsFastSmiElementsKind(ElementsKind kind)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
Handle< Object > ToHandle(LConstantOperand *op) const
static const int kHeaderSize
Definition: objects.h:9042
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:51
#define STATIC_ASCII_VECTOR(x)
Definition: utils.h:570
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
Definition: utils.h:574
static int OffsetOfElementAt(int index)
Definition: objects.h:3070
SwVfpRegister low() const
static int SizeFor(int length)
Definition: objects.h:3067
bool NeedsDeferredFrame() const
static const int kHeaderSize
Definition: objects.h:3016
AllocationFlags
const Register lr
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
Definition: shell.cc:171
LPointerMap * pointer_map() const
Definition: lithium-arm.h:250
static const int kMapOffset
Definition: objects.h:1890
static const int kValueOffset
Definition: objects.h:7779
static const int kFixedFrameSizeFromFp
Definition: frames.h:180
bool is(Register reg) const
const LowDwVfpRegister d2
const Register r1
#define kDoubleRegZero
#define V8_OVERRIDE
Definition: v8config.h:402
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
bool IsNumber() const
Definition: api.cc:2416
MemOperand FieldMemOperand(Register object, int offset)
#define __
static const int kHasNonInstancePrototype
Definition: objects.h:6468
void WriteTranslation(LEnvironment *environment, Translation *translation)
static const uint32_t kSignMask
Definition: objects.h:1980
static const int kNotDeoptimizationEntry
Definition: deoptimizer.h:258
LinkRegisterStatus GetLinkRegisterState() const
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
const int kSmiTagSize
Definition: v8.h:5479
static const int kHeaderSize
Definition: objects.h:5604
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
MemOperand PrepareKeyedOperand(Register key, Register base, bool key_is_constant, int constant_key, int element_size, int shift_size, int additional_index, int additional_offset)
T Abs(T a)
Definition: utils.h:241
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
Definition: objects-inl.h:5190
static const int kConstructorOffset
Definition: objects.h:6428
const uint32_t kOneByteStringTag
Definition: objects.h:611
const int kSmiTag
Definition: v8.h:5478
static const int kIsUndetectable
Definition: objects.h:6472
virtual void AfterCall() const V8_OVERRIDE
static const int kHeaderSize
Definition: objects.h:2757
Register ToRegister(LOperand *op) const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
void DoDeferredTaggedToI(LTaggedToI *instr)
static const int kInstrSize
LParallelMove * GetParallelMove(InnerPosition pos)
Definition: lithium-arm.h:374
static const int kPrototypeOffset
Definition: objects.h:6427
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
const LowDwVfpRegister d1
const Register fp
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
MemOperand ToMemOperand(LOperand *op) const
bool IsNextEmittedBlock(int block_id) const
static const int kCompilerHintsOffset
Definition: objects.h:7171
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
static const int kSharedFunctionInfoOffset
Definition: objects.h:7521
static const int kMaxValue
Definition: objects.h:1681
const int kCharSize
Definition: globals.h:261
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8914
static const int kExponentOffset
Definition: objects.h:1977
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1516
const int kUC16Size
Definition: globals.h:312
const Register r5
const uint32_t kStringEncodingMask
Definition: objects.h:609
static const int kInstanceTypeOffset
Definition: objects.h:6459
virtual void BeforeCall(int call_size) const V8_OVERRIDE
static const int kMantissaOffset
Definition: objects.h:1976
const Register r4