v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 #include "hydrogen-osr.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 class SafepointGenerator V8_FINAL : public CallWrapper {
41  public:
43  LPointerMap* pointers,
44  Safepoint::DeoptMode mode)
45  : codegen_(codegen),
46  pointers_(pointers),
47  deopt_mode_(mode) { }
48  virtual ~SafepointGenerator() { }
49 
50  virtual void BeforeCall(int call_size) const { }
51 
52  virtual void AfterCall() const {
53  codegen_->RecordSafepoint(pointers_, deopt_mode_);
54  }
55 
56  private:
57  LCodeGen* codegen_;
58  LPointerMap* pointers_;
59  Safepoint::DeoptMode deopt_mode_;
60 };
61 
62 
63 #define __ masm()->
64 
65 // Emit code to branch if the given condition holds.
66 // The code generated here doesn't modify the flags and they must have
67 // been set by some prior instructions.
68 //
69 // The EmitInverted function simply inverts the condition.
70 class BranchOnCondition : public BranchGenerator {
71  public:
73  : BranchGenerator(codegen),
74  cond_(cond) { }
75 
76  virtual void Emit(Label* label) const {
77  __ B(cond_, label);
78  }
79 
80  virtual void EmitInverted(Label* label) const {
81  if (cond_ != al) {
82  __ B(InvertCondition(cond_), label);
83  }
84  }
85 
86  private:
87  Condition cond_;
88 };
89 
90 
91 // Emit code to compare lhs and rhs and branch if the condition holds.
92 // This uses MacroAssembler's CompareAndBranch function so it will handle
93 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
94 //
95 // EmitInverted still compares the two operands but inverts the condition.
96 class CompareAndBranch : public BranchGenerator {
97  public:
99  Condition cond,
100  const Register& lhs,
101  const Operand& rhs)
102  : BranchGenerator(codegen),
103  cond_(cond),
104  lhs_(lhs),
105  rhs_(rhs) { }
106 
107  virtual void Emit(Label* label) const {
108  __ CompareAndBranch(lhs_, rhs_, cond_, label);
109  }
110 
111  virtual void EmitInverted(Label* label) const {
112  __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
113  }
114 
115  private:
116  Condition cond_;
117  const Register& lhs_;
118  const Operand& rhs_;
119 };
120 
121 
122 // Test the input with the given mask and branch if the condition holds.
123 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
124 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
125 // conversion to Tbz/Tbnz when possible.
126 class TestAndBranch : public BranchGenerator {
127  public:
129  Condition cond,
130  const Register& value,
131  uint64_t mask)
132  : BranchGenerator(codegen),
133  cond_(cond),
134  value_(value),
135  mask_(mask) { }
136 
137  virtual void Emit(Label* label) const {
138  switch (cond_) {
139  case eq:
140  __ TestAndBranchIfAllClear(value_, mask_, label);
141  break;
142  case ne:
143  __ TestAndBranchIfAnySet(value_, mask_, label);
144  break;
145  default:
146  __ Tst(value_, mask_);
147  __ B(cond_, label);
148  }
149  }
150 
151  virtual void EmitInverted(Label* label) const {
152  // The inverse of "all clear" is "any set" and vice versa.
153  switch (cond_) {
154  case eq:
155  __ TestAndBranchIfAnySet(value_, mask_, label);
156  break;
157  case ne:
158  __ TestAndBranchIfAllClear(value_, mask_, label);
159  break;
160  default:
161  __ Tst(value_, mask_);
162  __ B(InvertCondition(cond_), label);
163  }
164  }
165 
166  private:
167  Condition cond_;
168  const Register& value_;
169  uint64_t mask_;
170 };
171 
172 
173 // Test the input and branch if it is non-zero and not a NaN.
174 class BranchIfNonZeroNumber : public BranchGenerator {
175  public:
176  BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
177  const FPRegister& scratch)
178  : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
179 
180  virtual void Emit(Label* label) const {
181  __ Fabs(scratch_, value_);
182  // Compare with 0.0. Because scratch_ is positive, the result can be one of
183  // nZCv (equal), nzCv (greater) or nzCV (unordered).
184  __ Fcmp(scratch_, 0.0);
185  __ B(gt, label);
186  }
187 
188  virtual void EmitInverted(Label* label) const {
189  __ Fabs(scratch_, value_);
190  __ Fcmp(scratch_, 0.0);
191  __ B(le, label);
192  }
193 
194  private:
195  const FPRegister& value_;
196  const FPRegister& scratch_;
197 };
198 
199 
200 // Test the input and branch if it is a heap number.
201 class BranchIfHeapNumber : public BranchGenerator {
202  public:
203  BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
204  : BranchGenerator(codegen), value_(value) { }
205 
206  virtual void Emit(Label* label) const {
207  __ JumpIfHeapNumber(value_, label);
208  }
209 
210  virtual void EmitInverted(Label* label) const {
211  __ JumpIfNotHeapNumber(value_, label);
212  }
213 
214  private:
215  const Register& value_;
216 };
217 
218 
219 // Test the input and branch if it is the specified root value.
220 class BranchIfRoot : public BranchGenerator {
221  public:
222  BranchIfRoot(LCodeGen* codegen, const Register& value,
223  Heap::RootListIndex index)
224  : BranchGenerator(codegen), value_(value), index_(index) { }
225 
226  virtual void Emit(Label* label) const {
227  __ JumpIfRoot(value_, index_, label);
228  }
229 
230  virtual void EmitInverted(Label* label) const {
231  __ JumpIfNotRoot(value_, index_, label);
232  }
233 
234  private:
235  const Register& value_;
236  const Heap::RootListIndex index_;
237 };
238 
239 
240 void LCodeGen::WriteTranslation(LEnvironment* environment,
241  Translation* translation) {
242  if (environment == NULL) return;
243 
244  // The translation includes one command per value in the environment.
245  int translation_size = environment->translation_size();
246  // The output frame height does not include the parameters.
247  int height = translation_size - environment->parameter_count();
248 
249  WriteTranslation(environment->outer(), translation);
250  bool has_closure_id = !info()->closure().is_null() &&
251  !info()->closure().is_identical_to(environment->closure());
252  int closure_id = has_closure_id
253  ? DefineDeoptimizationLiteral(environment->closure())
254  : Translation::kSelfLiteralId;
255 
256  switch (environment->frame_type()) {
257  case JS_FUNCTION:
258  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
259  break;
260  case JS_CONSTRUCT:
261  translation->BeginConstructStubFrame(closure_id, translation_size);
262  break;
263  case JS_GETTER:
264  ASSERT(translation_size == 1);
265  ASSERT(height == 0);
266  translation->BeginGetterStubFrame(closure_id);
267  break;
268  case JS_SETTER:
269  ASSERT(translation_size == 2);
270  ASSERT(height == 0);
271  translation->BeginSetterStubFrame(closure_id);
272  break;
273  case STUB:
274  translation->BeginCompiledStubFrame();
275  break;
276  case ARGUMENTS_ADAPTOR:
277  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
278  break;
279  default:
280  UNREACHABLE();
281  }
282 
283  int object_index = 0;
284  int dematerialized_index = 0;
285  for (int i = 0; i < translation_size; ++i) {
286  LOperand* value = environment->values()->at(i);
287 
288  AddToTranslation(environment,
289  translation,
290  value,
291  environment->HasTaggedValueAt(i),
292  environment->HasUint32ValueAt(i),
293  &object_index,
294  &dematerialized_index);
295  }
296 }
297 
298 
299 void LCodeGen::AddToTranslation(LEnvironment* environment,
300  Translation* translation,
301  LOperand* op,
302  bool is_tagged,
303  bool is_uint32,
304  int* object_index_pointer,
305  int* dematerialized_index_pointer) {
306  if (op == LEnvironment::materialization_marker()) {
307  int object_index = (*object_index_pointer)++;
308  if (environment->ObjectIsDuplicateAt(object_index)) {
309  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
310  translation->DuplicateObject(dupe_of);
311  return;
312  }
313  int object_length = environment->ObjectLengthAt(object_index);
314  if (environment->ObjectIsArgumentsAt(object_index)) {
315  translation->BeginArgumentsObject(object_length);
316  } else {
317  translation->BeginCapturedObject(object_length);
318  }
319  int dematerialized_index = *dematerialized_index_pointer;
320  int env_offset = environment->translation_size() + dematerialized_index;
321  *dematerialized_index_pointer += object_length;
322  for (int i = 0; i < object_length; ++i) {
323  LOperand* value = environment->values()->at(env_offset + i);
324  AddToTranslation(environment,
325  translation,
326  value,
327  environment->HasTaggedValueAt(env_offset + i),
328  environment->HasUint32ValueAt(env_offset + i),
329  object_index_pointer,
330  dematerialized_index_pointer);
331  }
332  return;
333  }
334 
335  if (op->IsStackSlot()) {
336  if (is_tagged) {
337  translation->StoreStackSlot(op->index());
338  } else if (is_uint32) {
339  translation->StoreUint32StackSlot(op->index());
340  } else {
341  translation->StoreInt32StackSlot(op->index());
342  }
343  } else if (op->IsDoubleStackSlot()) {
344  translation->StoreDoubleStackSlot(op->index());
345  } else if (op->IsRegister()) {
346  Register reg = ToRegister(op);
347  if (is_tagged) {
348  translation->StoreRegister(reg);
349  } else if (is_uint32) {
350  translation->StoreUint32Register(reg);
351  } else {
352  translation->StoreInt32Register(reg);
353  }
354  } else if (op->IsDoubleRegister()) {
356  translation->StoreDoubleRegister(reg);
357  } else if (op->IsConstantOperand()) {
358  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
359  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
360  translation->StoreLiteral(src_index);
361  } else {
362  UNREACHABLE();
363  }
364 }
365 
366 
367 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
368  int result = deoptimization_literals_.length();
369  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
370  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
371  }
372  deoptimization_literals_.Add(literal, zone());
373  return result;
374 }
375 
376 
377 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
378  Safepoint::DeoptMode mode) {
379  if (!environment->HasBeenRegistered()) {
380  int frame_count = 0;
381  int jsframe_count = 0;
382  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
383  ++frame_count;
384  if (e->frame_type() == JS_FUNCTION) {
385  ++jsframe_count;
386  }
387  }
388  Translation translation(&translations_, frame_count, jsframe_count, zone());
389  WriteTranslation(environment, &translation);
390  int deoptimization_index = deoptimizations_.length();
391  int pc_offset = masm()->pc_offset();
392  environment->Register(deoptimization_index,
393  translation.index(),
394  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
395  deoptimizations_.Add(environment, zone());
396  }
397 }
398 
399 
400 void LCodeGen::CallCode(Handle<Code> code,
401  RelocInfo::Mode mode,
402  LInstruction* instr) {
403  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
404 }
405 
406 
407 void LCodeGen::CallCodeGeneric(Handle<Code> code,
408  RelocInfo::Mode mode,
409  LInstruction* instr,
410  SafepointMode safepoint_mode) {
411  ASSERT(instr != NULL);
412 
413  Assembler::BlockPoolsScope scope(masm_);
414  __ Call(code, mode);
415  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
416 
417  if ((code->kind() == Code::BINARY_OP_IC) ||
418  (code->kind() == Code::COMPARE_IC)) {
419  // Signal that we don't inline smi code before these stubs in the
420  // optimizing code generator.
422  }
423 }
424 
425 
426 void LCodeGen::DoCallFunction(LCallFunction* instr) {
427  ASSERT(ToRegister(instr->context()).is(cp));
428  ASSERT(ToRegister(instr->function()).Is(x1));
429  ASSERT(ToRegister(instr->result()).Is(x0));
430 
431  int arity = instr->arity();
432  CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
433  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
434 }
435 
436 
437 void LCodeGen::DoCallNew(LCallNew* instr) {
438  ASSERT(ToRegister(instr->context()).is(cp));
439  ASSERT(instr->IsMarkedAsCall());
440  ASSERT(ToRegister(instr->constructor()).is(x1));
441 
442  __ Mov(x0, instr->arity());
443  // No cell in x2 for construct type feedback in optimized code.
444  __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
445 
446  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
447  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
448 
449  ASSERT(ToRegister(instr->result()).is(x0));
450 }
451 
452 
453 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
454  ASSERT(instr->IsMarkedAsCall());
455  ASSERT(ToRegister(instr->context()).is(cp));
456  ASSERT(ToRegister(instr->constructor()).is(x1));
457 
458  __ Mov(x0, Operand(instr->arity()));
459  __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
460 
461  ElementsKind kind = instr->hydrogen()->elements_kind();
462  AllocationSiteOverrideMode override_mode =
465  : DONT_OVERRIDE;
466 
467  if (instr->arity() == 0) {
468  ArrayNoArgumentConstructorStub stub(kind, override_mode);
469  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
470  } else if (instr->arity() == 1) {
471  Label done;
472  if (IsFastPackedElementsKind(kind)) {
473  Label packed_case;
474 
475  // We might need to create a holey array; look at the first argument.
476  __ Peek(x10, 0);
477  __ Cbz(x10, &packed_case);
478 
479  ElementsKind holey_kind = GetHoleyElementsKind(kind);
480  ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
481  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
482  __ B(&done);
483  __ Bind(&packed_case);
484  }
485 
486  ArraySingleArgumentConstructorStub stub(kind, override_mode);
487  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
488  __ Bind(&done);
489  } else {
490  ArrayNArgumentsConstructorStub stub(kind, override_mode);
491  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
492  }
493 
494  ASSERT(ToRegister(instr->result()).is(x0));
495 }
496 
497 
498 void LCodeGen::CallRuntime(const Runtime::Function* function,
499  int num_arguments,
500  LInstruction* instr,
501  SaveFPRegsMode save_doubles) {
502  ASSERT(instr != NULL);
503 
504  __ CallRuntime(function, num_arguments, save_doubles);
505 
506  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
507 }
508 
509 
510 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
511  if (context->IsRegister()) {
512  __ Mov(cp, ToRegister(context));
513  } else if (context->IsStackSlot()) {
514  __ Ldr(cp, ToMemOperand(context));
515  } else if (context->IsConstantOperand()) {
516  HConstant* constant =
517  chunk_->LookupConstant(LConstantOperand::cast(context));
518  __ LoadHeapObject(cp,
519  Handle<HeapObject>::cast(constant->handle(isolate())));
520  } else {
521  UNREACHABLE();
522  }
523 }
524 
525 
526 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
527  int argc,
528  LInstruction* instr,
529  LOperand* context) {
530  LoadContextFromDeferred(context);
531  __ CallRuntimeSaveDoubles(id);
532  RecordSafepointWithRegisters(
533  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
534 }
535 
536 
537 void LCodeGen::RecordAndWritePosition(int position) {
538  if (position == RelocInfo::kNoPosition) return;
539  masm()->positions_recorder()->RecordPosition(position);
540  masm()->positions_recorder()->WriteRecordedPositions();
541 }
542 
543 
544 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
545  SafepointMode safepoint_mode) {
546  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
547  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
548  } else {
549  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
550  RecordSafepointWithRegisters(
551  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
552  }
553 }
554 
555 
556 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
557  Safepoint::Kind kind,
558  int arguments,
559  Safepoint::DeoptMode deopt_mode) {
560  ASSERT(expected_safepoint_kind_ == kind);
561 
562  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
563  Safepoint safepoint = safepoints_.DefineSafepoint(
564  masm(), kind, arguments, deopt_mode);
565 
566  for (int i = 0; i < operands->length(); i++) {
567  LOperand* pointer = operands->at(i);
568  if (pointer->IsStackSlot()) {
569  safepoint.DefinePointerSlot(pointer->index(), zone());
570  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
571  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
572  }
573  }
574 
575  if (kind & Safepoint::kWithRegisters) {
576  // Register cp always contains a pointer to the context.
577  safepoint.DefinePointerRegister(cp, zone());
578  }
579 }
580 
581 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
582  Safepoint::DeoptMode deopt_mode) {
583  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
584 }
585 
586 
587 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
588  LPointerMap empty_pointers(zone());
589  RecordSafepoint(&empty_pointers, deopt_mode);
590 }
591 
592 
593 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
594  int arguments,
595  Safepoint::DeoptMode deopt_mode) {
596  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
597 }
598 
599 
600 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
601  LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
602  RecordSafepoint(
603  pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
604 }
605 
606 
607 bool LCodeGen::GenerateCode() {
608  LPhase phase("Z_Code generation", chunk());
609  ASSERT(is_unused());
610  status_ = GENERATING;
611 
612  // Open a frame scope to indicate that there is a frame on the stack. The
613  // NONE indicates that the scope shouldn't actually generate code to set up
614  // the frame (that is done in GeneratePrologue).
615  FrameScope frame_scope(masm_, StackFrame::NONE);
616 
617  return GeneratePrologue() &&
618  GenerateBody() &&
619  GenerateDeferredCode() &&
620  GenerateDeoptJumpTable() &&
621  GenerateSafepointTable();
622 }
623 
624 
625 void LCodeGen::SaveCallerDoubles() {
626  ASSERT(info()->saves_caller_doubles());
628  Comment(";;; Save clobbered callee double registers");
629  BitVector* doubles = chunk()->allocated_double_registers();
630  BitVector::Iterator iterator(doubles);
631  int count = 0;
632  while (!iterator.Done()) {
633  // TODO(all): Is this supposed to save just the callee-saved doubles? It
634  // looks like it's saving all of them.
635  FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
636  __ Poke(value, count * kDoubleSize);
637  iterator.Advance();
638  count++;
639  }
640 }
641 
642 
643 void LCodeGen::RestoreCallerDoubles() {
644  ASSERT(info()->saves_caller_doubles());
646  Comment(";;; Restore clobbered callee double registers");
647  BitVector* doubles = chunk()->allocated_double_registers();
648  BitVector::Iterator iterator(doubles);
649  int count = 0;
650  while (!iterator.Done()) {
651  // TODO(all): Is this supposed to restore just the callee-saved doubles? It
652  // looks like it's restoring all of them.
653  FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
654  __ Peek(value, count * kDoubleSize);
655  iterator.Advance();
656  count++;
657  }
658 }
659 
660 
661 bool LCodeGen::GeneratePrologue() {
662  ASSERT(is_generating());
663 
664  if (info()->IsOptimizing()) {
666 
667  // TODO(all): Add support for stop_t FLAG in DEBUG mode.
668 
669  // Sloppy mode functions and builtins need to replace the receiver with the
670  // global proxy when called as functions (without an explicit receiver
671  // object).
672  if (info_->this_has_uses() &&
673  info_->strict_mode() == SLOPPY &&
674  !info_->is_native()) {
675  Label ok;
676  int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
677  __ Peek(x10, receiver_offset);
678  __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
679 
680  __ Ldr(x10, GlobalObjectMemOperand());
682  __ Poke(x10, receiver_offset);
683 
684  __ Bind(&ok);
685  }
686  }
687 
688  ASSERT(__ StackPointer().Is(jssp));
689  info()->set_prologue_offset(masm_->pc_offset());
690  if (NeedsEagerFrame()) {
691  __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
692  frame_is_built_ = true;
693  info_->AddNoFrameRange(0, masm_->pc_offset());
694  }
695 
696  // Reserve space for the stack slots needed by the code.
697  int slots = GetStackSlotCount();
698  if (slots > 0) {
699  __ Claim(slots, kPointerSize);
700  }
701 
702  if (info()->saves_caller_doubles()) {
703  SaveCallerDoubles();
704  }
705 
706  // Allocate a local context if needed.
707  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
708  if (heap_slots > 0) {
709  Comment(";;; Allocate local context");
710  // Argument to NewContext is the function, which is in x1.
711  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
712  FastNewContextStub stub(heap_slots);
713  __ CallStub(&stub);
714  } else {
715  __ Push(x1);
716  __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
717  }
718  RecordSafepoint(Safepoint::kNoLazyDeopt);
719  // Context is returned in x0. It replaces the context passed to us. It's
720  // saved in the stack and kept live in cp.
721  __ Mov(cp, x0);
723  // Copy any necessary parameters into the context.
724  int num_parameters = scope()->num_parameters();
725  for (int i = 0; i < num_parameters; i++) {
726  Variable* var = scope()->parameter(i);
727  if (var->IsContextSlot()) {
728  Register value = x0;
729  Register scratch = x3;
730 
731  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
732  (num_parameters - 1 - i) * kPointerSize;
733  // Load parameter from stack.
734  __ Ldr(value, MemOperand(fp, parameter_offset));
735  // Store it in the context.
736  MemOperand target = ContextMemOperand(cp, var->index());
737  __ Str(value, target);
738  // Update the write barrier. This clobbers value and scratch.
739  __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
741  }
742  }
743  Comment(";;; End allocate local context");
744  }
745 
746  // Trace the call.
747  if (FLAG_trace && info()->IsOptimizing()) {
748  // We have not executed any compiled code yet, so cp still holds the
749  // incoming context.
750  __ CallRuntime(Runtime::kTraceEnter, 0);
751  }
752 
753  return !is_aborted();
754 }
755 
756 
757 void LCodeGen::GenerateOsrPrologue() {
758  // Generate the OSR entry prologue at the first unknown OSR value, or if there
759  // are none, at the OSR entrypoint instruction.
760  if (osr_pc_offset_ >= 0) return;
761 
762  osr_pc_offset_ = masm()->pc_offset();
763 
764  // Adjust the frame size, subsuming the unoptimized frame into the
765  // optimized frame.
766  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
767  ASSERT(slots >= 0);
768  __ Claim(slots);
769 }
770 
771 
772 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
773  if (instr->IsCall()) {
774  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
775  }
776  if (!instr->IsLazyBailout() && !instr->IsGap()) {
777  safepoints_.BumpLastLazySafepointIndex();
778  }
779 }
780 
781 
782 bool LCodeGen::GenerateDeferredCode() {
783  ASSERT(is_generating());
784  if (deferred_.length() > 0) {
785  for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
786  LDeferredCode* code = deferred_[i];
787 
788  HValue* value =
789  instructions_->at(code->instruction_index())->hydrogen_value();
790  RecordAndWritePosition(
791  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
792 
793  Comment(";;; <@%d,#%d> "
794  "-------------------- Deferred %s --------------------",
795  code->instruction_index(),
796  code->instr()->hydrogen_value()->id(),
797  code->instr()->Mnemonic());
798 
799  __ Bind(code->entry());
800 
801  if (NeedsDeferredFrame()) {
802  Comment(";;; Build frame");
803  ASSERT(!frame_is_built_);
804  ASSERT(info()->IsStub());
805  frame_is_built_ = true;
806  __ Push(lr, fp, cp);
808  __ Push(fp);
809  __ Add(fp, __ StackPointer(),
811  Comment(";;; Deferred code");
812  }
813 
814  code->Generate();
815 
816  if (NeedsDeferredFrame()) {
817  Comment(";;; Destroy frame");
818  ASSERT(frame_is_built_);
819  __ Pop(xzr, cp, fp, lr);
820  frame_is_built_ = false;
821  }
822 
823  __ B(code->exit());
824  }
825  }
826 
827  // Force constant pool emission at the end of the deferred code to make
828  // sure that no constant pools are emitted after deferred code because
829  // deferred code generation is the last step which generates code. The two
830  // following steps will only output data used by crakshaft.
831  masm()->CheckConstPool(true, false);
832 
833  return !is_aborted();
834 }
835 
836 
837 bool LCodeGen::GenerateDeoptJumpTable() {
838  if (deopt_jump_table_.length() > 0) {
839  Comment(";;; -------------------- Jump table --------------------");
840  }
841  Label table_start;
842  __ bind(&table_start);
843  Label needs_frame;
844  for (int i = 0; i < deopt_jump_table_.length(); i++) {
845  __ Bind(&deopt_jump_table_[i]->label);
846  Address entry = deopt_jump_table_[i]->address;
847  Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
848  int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
850  Comment(";;; jump table entry %d.", i);
851  } else {
852  Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
853  }
854  if (deopt_jump_table_[i]->needs_frame) {
855  ASSERT(!info()->saves_caller_doubles());
856 
857  UseScratchRegisterScope temps(masm());
858  Register stub_deopt_entry = temps.AcquireX();
859  Register stub_marker = temps.AcquireX();
860 
861  __ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry));
862  if (needs_frame.is_bound()) {
863  __ B(&needs_frame);
864  } else {
865  __ Bind(&needs_frame);
866  // This variant of deopt can only be used with stubs. Since we don't
867  // have a function pointer to install in the stack frame that we're
868  // building, install a special marker there instead.
869  ASSERT(info()->IsStub());
870  __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
871  __ Push(lr, fp, cp, stub_marker);
872  __ Add(fp, __ StackPointer(), 2 * kPointerSize);
873  __ Call(stub_deopt_entry);
874  }
875  } else {
876  if (info()->saves_caller_doubles()) {
877  ASSERT(info()->IsStub());
878  RestoreCallerDoubles();
879  }
880  __ Call(entry, RelocInfo::RUNTIME_ENTRY);
881  }
882  masm()->CheckConstPool(false, false);
883  }
884 
885  // Force constant pool emission at the end of the deopt jump table to make
886  // sure that no constant pools are emitted after.
887  masm()->CheckConstPool(true, false);
888 
889  // The deoptimization jump table is the last part of the instruction
890  // sequence. Mark the generated code as done unless we bailed out.
891  if (!is_aborted()) status_ = DONE;
892  return !is_aborted();
893 }
894 
895 
896 bool LCodeGen::GenerateSafepointTable() {
897  ASSERT(is_done());
898  // We do not know how much data will be emitted for the safepoint table, so
899  // force emission of the veneer pool.
900  masm()->CheckVeneerPool(true, true);
901  safepoints_.Emit(masm(), GetStackSlotCount());
902  return !is_aborted();
903 }
904 
905 
906 void LCodeGen::FinishCode(Handle<Code> code) {
907  ASSERT(is_done());
908  code->set_stack_slots(GetStackSlotCount());
909  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
910  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
911  PopulateDeoptimizationData(code);
912  info()->CommitDependencies(code);
913 }
914 
915 
916 void LCodeGen::Abort(BailoutReason reason) {
917  info()->set_bailout_reason(reason);
918  status_ = ABORTED;
919 }
920 
921 
922 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
923  int length = deoptimizations_.length();
924  if (length == 0) return;
925 
926  Handle<DeoptimizationInputData> data =
927  factory()->NewDeoptimizationInputData(length, TENURED);
928 
929  Handle<ByteArray> translations =
930  translations_.CreateByteArray(isolate()->factory());
931  data->SetTranslationByteArray(*translations);
932  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
933  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
934  if (info_->IsOptimizing()) {
935  // Reference to shared function info does not change between phases.
936  AllowDeferredHandleDereference allow_handle_dereference;
937  data->SetSharedFunctionInfo(*info_->shared_info());
938  } else {
939  data->SetSharedFunctionInfo(Smi::FromInt(0));
940  }
941 
942  Handle<FixedArray> literals =
943  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
944  { AllowDeferredHandleDereference copy_handles;
945  for (int i = 0; i < deoptimization_literals_.length(); i++) {
946  literals->set(i, *deoptimization_literals_[i]);
947  }
948  data->SetLiteralArray(*literals);
949  }
950 
951  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
952  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
953 
954  // Populate the deoptimization entries.
955  for (int i = 0; i < length; i++) {
956  LEnvironment* env = deoptimizations_[i];
957  data->SetAstId(i, env->ast_id());
958  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
959  data->SetArgumentsStackHeight(i,
960  Smi::FromInt(env->arguments_stack_height()));
961  data->SetPc(i, Smi::FromInt(env->pc_offset()));
962  }
963 
964  code->set_deoptimization_data(*data);
965 }
966 
967 
968 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
969  ASSERT(deoptimization_literals_.length() == 0);
970 
971  const ZoneList<Handle<JSFunction> >* inlined_closures =
972  chunk()->inlined_closures();
973 
974  for (int i = 0, length = inlined_closures->length(); i < length; i++) {
975  DefineDeoptimizationLiteral(inlined_closures->at(i));
976  }
977 
978  inlined_function_count_ = deoptimization_literals_.length();
979 }
980 
981 
982 void LCodeGen::DeoptimizeBranch(
983  LEnvironment* environment,
984  BranchType branch_type, Register reg, int bit,
985  Deoptimizer::BailoutType* override_bailout_type) {
986  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
987  Deoptimizer::BailoutType bailout_type =
988  info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
989 
990  if (override_bailout_type != NULL) {
991  bailout_type = *override_bailout_type;
992  }
993 
994  ASSERT(environment->HasBeenRegistered());
995  ASSERT(info()->IsOptimizing() || info()->IsStub());
996  int id = environment->deoptimization_index();
997  Address entry =
998  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
999 
1000  if (entry == NULL) {
1001  Abort(kBailoutWasNotPrepared);
1002  }
1003 
1004  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1005  Label not_zero;
1006  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1007 
1008  __ Push(x0, x1, x2);
1009  __ Mrs(x2, NZCV);
1010  __ Mov(x0, count);
1011  __ Ldr(w1, MemOperand(x0));
1012  __ Subs(x1, x1, 1);
1013  __ B(gt, &not_zero);
1014  __ Mov(w1, FLAG_deopt_every_n_times);
1015  __ Str(w1, MemOperand(x0));
1016  __ Pop(x2, x1, x0);
1017  ASSERT(frame_is_built_);
1018  __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1019  __ Unreachable();
1020 
1021  __ Bind(&not_zero);
1022  __ Str(w1, MemOperand(x0));
1023  __ Msr(NZCV, x2);
1024  __ Pop(x2, x1, x0);
1025  }
1026 
1027  if (info()->ShouldTrapOnDeopt()) {
1028  Label dont_trap;
1029  __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1030  __ Debug("trap_on_deopt", __LINE__, BREAK);
1031  __ Bind(&dont_trap);
1032  }
1033 
1034  ASSERT(info()->IsStub() || frame_is_built_);
1035  // Go through jump table if we need to build frame, or restore caller doubles.
1036  if (branch_type == always &&
1037  frame_is_built_ && !info()->saves_caller_doubles()) {
1038  __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1039  } else {
1040  // We often have several deopts to the same entry, reuse the last
1041  // jump entry if this is the case.
1042  if (deopt_jump_table_.is_empty() ||
1043  (deopt_jump_table_.last()->address != entry) ||
1044  (deopt_jump_table_.last()->bailout_type != bailout_type) ||
1045  (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
1046  Deoptimizer::JumpTableEntry* table_entry =
1047  new(zone()) Deoptimizer::JumpTableEntry(entry,
1048  bailout_type,
1049  !frame_is_built_);
1050  deopt_jump_table_.Add(table_entry, zone());
1051  }
1052  __ B(&deopt_jump_table_.last()->label,
1053  branch_type, reg, bit);
1054  }
1055 }
1056 
1057 
1058 void LCodeGen::Deoptimize(LEnvironment* environment,
1059  Deoptimizer::BailoutType* override_bailout_type) {
1060  DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
1061 }
1062 
1063 
1064 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
1065  DeoptimizeBranch(environment, static_cast<BranchType>(cond));
1066 }
1067 
1068 
1069 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
1070  DeoptimizeBranch(environment, reg_zero, rt);
1071 }
1072 
1073 
1074 void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
1075  DeoptimizeBranch(environment, reg_not_zero, rt);
1076 }
1077 
1078 
1079 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
1080  int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1081  DeoptimizeIfBitSet(rt, sign_bit, environment);
1082 }
1083 
1084 
1085 void LCodeGen::DeoptimizeIfSmi(Register rt,
1086  LEnvironment* environment) {
1087  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
1088 }
1089 
1090 
1091 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
1092  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
1093 }
1094 
1095 
1096 void LCodeGen::DeoptimizeIfRoot(Register rt,
1097  Heap::RootListIndex index,
1098  LEnvironment* environment) {
1099  __ CompareRoot(rt, index);
1100  DeoptimizeIf(eq, environment);
1101 }
1102 
1103 
1104 void LCodeGen::DeoptimizeIfNotRoot(Register rt,
1105  Heap::RootListIndex index,
1106  LEnvironment* environment) {
1107  __ CompareRoot(rt, index);
1108  DeoptimizeIf(ne, environment);
1109 }
1110 
1111 
1112 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
1113  LEnvironment* environment) {
1114  __ TestForMinusZero(input);
1115  DeoptimizeIf(vs, environment);
1116 }
1117 
1118 
1119 void LCodeGen::DeoptimizeIfBitSet(Register rt,
1120  int bit,
1121  LEnvironment* environment) {
1122  DeoptimizeBranch(environment, reg_bit_set, rt, bit);
1123 }
1124 
1125 
1126 void LCodeGen::DeoptimizeIfBitClear(Register rt,
1127  int bit,
1128  LEnvironment* environment) {
1129  DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
1130 }
1131 
1132 
1133 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1134  if (!info()->IsStub()) {
1135  // Ensure that we have enough space after the previous lazy-bailout
1136  // instruction for patching the code here.
1137  intptr_t current_pc = masm()->pc_offset();
1138 
1139  if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1140  ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1141  ASSERT((padding_size % kInstructionSize) == 0);
1142  InstructionAccurateScope instruction_accurate(
1143  masm(), padding_size / kInstructionSize);
1144 
1145  while (padding_size > 0) {
1146  __ nop();
1147  padding_size -= kInstructionSize;
1148  }
1149  }
1150  }
1151  last_lazy_deopt_pc_ = masm()->pc_offset();
1152 }
1153 
1154 
1155 Register LCodeGen::ToRegister(LOperand* op) const {
1156  // TODO(all): support zero register results, as ToRegister32.
1157  ASSERT((op != NULL) && op->IsRegister());
1158  return Register::FromAllocationIndex(op->index());
1159 }
1160 
1161 
1163  ASSERT(op != NULL);
1164  if (op->IsConstantOperand()) {
1165  // If this is a constant operand, the result must be the zero register.
1166  ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
1167  return wzr;
1168  } else {
1169  return ToRegister(op).W();
1170  }
1171 }
1172 
1173 
1174 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1175  HConstant* constant = chunk_->LookupConstant(op);
1176  return Smi::FromInt(constant->Integer32Value());
1177 }
1178 
1179 
1180 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1181  ASSERT((op != NULL) && op->IsDoubleRegister());
1182  return DoubleRegister::FromAllocationIndex(op->index());
1183 }
1184 
1185 
1186 Operand LCodeGen::ToOperand(LOperand* op) {
1187  ASSERT(op != NULL);
1188  if (op->IsConstantOperand()) {
1189  LConstantOperand* const_op = LConstantOperand::cast(op);
1190  HConstant* constant = chunk()->LookupConstant(const_op);
1191  Representation r = chunk_->LookupLiteralRepresentation(const_op);
1192  if (r.IsSmi()) {
1193  ASSERT(constant->HasSmiValue());
1194  return Operand(Smi::FromInt(constant->Integer32Value()));
1195  } else if (r.IsInteger32()) {
1196  ASSERT(constant->HasInteger32Value());
1197  return Operand(constant->Integer32Value());
1198  } else if (r.IsDouble()) {
1199  Abort(kToOperandUnsupportedDoubleImmediate);
1200  }
1201  ASSERT(r.IsTagged());
1202  return Operand(constant->handle(isolate()));
1203  } else if (op->IsRegister()) {
1204  return Operand(ToRegister(op));
1205  } else if (op->IsDoubleRegister()) {
1206  Abort(kToOperandIsDoubleRegisterUnimplemented);
1207  return Operand(0);
1208  }
1209  // Stack slots not implemented, use ToMemOperand instead.
1210  UNREACHABLE();
1211  return Operand(0);
1212 }
1213 
1214 
1216  return ToOperand32(op, SIGNED_INT32);
1217 }
1218 
1219 
1221  return ToOperand32(op, UNSIGNED_INT32);
1222 }
1223 
1224 
1225 Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
1226  ASSERT(op != NULL);
1227  if (op->IsRegister()) {
1228  return Operand(ToRegister32(op));
1229  } else if (op->IsConstantOperand()) {
1230  LConstantOperand* const_op = LConstantOperand::cast(op);
1231  HConstant* constant = chunk()->LookupConstant(const_op);
1232  Representation r = chunk_->LookupLiteralRepresentation(const_op);
1233  if (r.IsInteger32()) {
1234  ASSERT(constant->HasInteger32Value());
1235  return Operand(signedness == SIGNED_INT32
1236  ? constant->Integer32Value()
1237  : static_cast<uint32_t>(constant->Integer32Value()));
1238  } else {
1239  // Other constants not implemented.
1240  Abort(kToOperand32UnsupportedImmediate);
1241  }
1242  }
1243  // Other cases are not implemented.
1244  UNREACHABLE();
1245  return Operand(0);
1246 }
1247 
1248 
1249 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
1250  ASSERT(index < 0);
1251  return -(index + 1) * kPointerSize;
1252 }
1253 
1254 
1255 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
1256  ASSERT(op != NULL);
1257  ASSERT(!op->IsRegister());
1258  ASSERT(!op->IsDoubleRegister());
1259  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
1260  if (NeedsEagerFrame()) {
1261  return MemOperand(fp, StackSlotOffset(op->index()));
1262  } else {
1263  // Retrieve parameter without eager stack-frame relative to the
1264  // stack-pointer.
1265  return MemOperand(masm()->StackPointer(),
1266  ArgumentsOffsetWithoutFrame(op->index()));
1267  }
1268 }
1269 
1270 
1271 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1272  HConstant* constant = chunk_->LookupConstant(op);
1273  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1274  return constant->handle(isolate());
1275 }
1276 
1277 
1278 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1279  return chunk_->LookupLiteralRepresentation(op).IsSmi();
1280 }
1281 
1282 
1283 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1284  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1285 }
1286 
1287 
1288 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1289  HConstant* constant = chunk_->LookupConstant(op);
1290  return constant->Integer32Value();
1291 }
1292 
1293 
1294 double LCodeGen::ToDouble(LConstantOperand* op) const {
1295  HConstant* constant = chunk_->LookupConstant(op);
1296  ASSERT(constant->HasDoubleValue());
1297  return constant->DoubleValue();
1298 }
1299 
1300 
1301 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1302  Condition cond = nv;
1303  switch (op) {
1304  case Token::EQ:
1305  case Token::EQ_STRICT:
1306  cond = eq;
1307  break;
1308  case Token::NE:
1309  case Token::NE_STRICT:
1310  cond = ne;
1311  break;
1312  case Token::LT:
1313  cond = is_unsigned ? lo : lt;
1314  break;
1315  case Token::GT:
1316  cond = is_unsigned ? hi : gt;
1317  break;
1318  case Token::LTE:
1319  cond = is_unsigned ? ls : le;
1320  break;
1321  case Token::GTE:
1322  cond = is_unsigned ? hs : ge;
1323  break;
1324  case Token::IN:
1325  case Token::INSTANCEOF:
1326  default:
1327  UNREACHABLE();
1328  }
1329  return cond;
1330 }
1331 
1332 
1333 template<class InstrType>
1334 void LCodeGen::EmitBranchGeneric(InstrType instr,
1335  const BranchGenerator& branch) {
1336  int left_block = instr->TrueDestination(chunk_);
1337  int right_block = instr->FalseDestination(chunk_);
1338 
1339  int next_block = GetNextEmittedBlock();
1340 
1341  if (right_block == left_block) {
1342  EmitGoto(left_block);
1343  } else if (left_block == next_block) {
1344  branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1345  } else if (right_block == next_block) {
1346  branch.Emit(chunk_->GetAssemblyLabel(left_block));
1347  } else {
1348  branch.Emit(chunk_->GetAssemblyLabel(left_block));
1349  __ B(chunk_->GetAssemblyLabel(right_block));
1350  }
1351 }
1352 
1353 
1354 template<class InstrType>
1355 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1356  ASSERT((condition != al) && (condition != nv));
1357  BranchOnCondition branch(this, condition);
1358  EmitBranchGeneric(instr, branch);
1359 }
1360 
1361 
1362 template<class InstrType>
1363 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1364  Condition condition,
1365  const Register& lhs,
1366  const Operand& rhs) {
1367  ASSERT((condition != al) && (condition != nv));
1368  CompareAndBranch branch(this, condition, lhs, rhs);
1369  EmitBranchGeneric(instr, branch);
1370 }
1371 
1372 
1373 template<class InstrType>
1374 void LCodeGen::EmitTestAndBranch(InstrType instr,
1375  Condition condition,
1376  const Register& value,
1377  uint64_t mask) {
1378  ASSERT((condition != al) && (condition != nv));
1379  TestAndBranch branch(this, condition, value, mask);
1380  EmitBranchGeneric(instr, branch);
1381 }
1382 
1383 
1384 template<class InstrType>
1385 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1386  const FPRegister& value,
1387  const FPRegister& scratch) {
1388  BranchIfNonZeroNumber branch(this, value, scratch);
1389  EmitBranchGeneric(instr, branch);
1390 }
1391 
1392 
1393 template<class InstrType>
1394 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1395  const Register& value) {
1396  BranchIfHeapNumber branch(this, value);
1397  EmitBranchGeneric(instr, branch);
1398 }
1399 
1400 
1401 template<class InstrType>
1402 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1403  const Register& value,
1404  Heap::RootListIndex index) {
1405  BranchIfRoot branch(this, value, index);
1406  EmitBranchGeneric(instr, branch);
1407 }
1408 
1409 
1410 void LCodeGen::DoGap(LGap* gap) {
1411  for (int i = LGap::FIRST_INNER_POSITION;
1413  i++) {
1414  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1415  LParallelMove* move = gap->GetParallelMove(inner_pos);
1416  if (move != NULL) {
1417  resolver_.Resolve(move);
1418  }
1419  }
1420 }
1421 
1422 
1423 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1424  Register arguments = ToRegister(instr->arguments());
1425  Register result = ToRegister(instr->result());
1426 
1427  // The pointer to the arguments array come from DoArgumentsElements.
1428  // It does not point directly to the arguments and there is an offest of
1429  // two words that we must take into account when accessing an argument.
1430  // Subtracting the index from length accounts for one, so we add one more.
1431 
1432  if (instr->length()->IsConstantOperand() &&
1433  instr->index()->IsConstantOperand()) {
1434  int index = ToInteger32(LConstantOperand::cast(instr->index()));
1435  int length = ToInteger32(LConstantOperand::cast(instr->length()));
1436  int offset = ((length - index) + 1) * kPointerSize;
1437  __ Ldr(result, MemOperand(arguments, offset));
1438  } else if (instr->index()->IsConstantOperand()) {
1439  Register length = ToRegister32(instr->length());
1440  int index = ToInteger32(LConstantOperand::cast(instr->index()));
1441  int loc = index - 1;
1442  if (loc != 0) {
1443  __ Sub(result.W(), length, loc);
1444  __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1445  } else {
1446  __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1447  }
1448  } else {
1449  Register length = ToRegister32(instr->length());
1450  Operand index = ToOperand32I(instr->index());
1451  __ Sub(result.W(), length, index);
1452  __ Add(result.W(), result.W(), 1);
1453  __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1454  }
1455 }
1456 
1457 
1458 void LCodeGen::DoAddE(LAddE* instr) {
1459  Register result = ToRegister(instr->result());
1460  Register left = ToRegister(instr->left());
1461  Operand right = (instr->right()->IsConstantOperand())
1462  ? ToInteger32(LConstantOperand::cast(instr->right()))
1463  : Operand(ToRegister32(instr->right()), SXTW);
1464 
1465  ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1466  __ Add(result, left, right);
1467 }
1468 
1469 
1470 void LCodeGen::DoAddI(LAddI* instr) {
1471  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1472  Register result = ToRegister32(instr->result());
1473  Register left = ToRegister32(instr->left());
1474  Operand right = ToOperand32I(instr->right());
1475  if (can_overflow) {
1476  __ Adds(result, left, right);
1477  DeoptimizeIf(vs, instr->environment());
1478  } else {
1479  __ Add(result, left, right);
1480  }
1481 }
1482 
1483 
1484 void LCodeGen::DoAddS(LAddS* instr) {
1485  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1486  Register result = ToRegister(instr->result());
1487  Register left = ToRegister(instr->left());
1488  Operand right = ToOperand(instr->right());
1489  if (can_overflow) {
1490  __ Adds(result, left, right);
1491  DeoptimizeIf(vs, instr->environment());
1492  } else {
1493  __ Add(result, left, right);
1494  }
1495 }
1496 
1497 
1498 void LCodeGen::DoAllocate(LAllocate* instr) {
1499  class DeferredAllocate: public LDeferredCode {
1500  public:
1501  DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1502  : LDeferredCode(codegen), instr_(instr) { }
1503  virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1504  virtual LInstruction* instr() { return instr_; }
1505  private:
1506  LAllocate* instr_;
1507  };
1508 
1509  DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1510 
1511  Register result = ToRegister(instr->result());
1512  Register temp1 = ToRegister(instr->temp1());
1513  Register temp2 = ToRegister(instr->temp2());
1514 
1515  // Allocate memory for the object.
1517  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1518  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1519  }
1520 
1521  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1522  ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1523  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1524  flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1525  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1526  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1527  flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1528  }
1529 
1530  if (instr->size()->IsConstantOperand()) {
1531  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1532  if (size <= Page::kMaxRegularHeapObjectSize) {
1533  __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1534  } else {
1535  __ B(deferred->entry());
1536  }
1537  } else {
1538  Register size = ToRegister32(instr->size());
1539  __ Sxtw(size.X(), size);
1540  __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1541  }
1542 
1543  __ Bind(deferred->exit());
1544 
1545  if (instr->hydrogen()->MustPrefillWithFiller()) {
1546  Register filler_count = temp1;
1547  Register filler = temp2;
1548  Register untagged_result = ToRegister(instr->temp3());
1549 
1550  if (instr->size()->IsConstantOperand()) {
1551  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1552  __ Mov(filler_count, size / kPointerSize);
1553  } else {
1554  __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1555  }
1556 
1557  __ Sub(untagged_result, result, kHeapObjectTag);
1558  __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1559  __ FillFields(untagged_result, filler_count, filler);
1560  } else {
1561  ASSERT(instr->temp3() == NULL);
1562  }
1563 }
1564 
1565 
1566 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1567  // TODO(3095996): Get rid of this. For now, we need to make the
1568  // result register contain a valid pointer because it is already
1569  // contained in the register pointer map.
1570  __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1571 
1572  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
1573  // We're in a SafepointRegistersScope so we can use any scratch registers.
1574  Register size = x0;
1575  if (instr->size()->IsConstantOperand()) {
1576  __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1577  } else {
1578  __ SmiTag(size, ToRegister32(instr->size()).X());
1579  }
1580  int flags = AllocateDoubleAlignFlag::encode(
1581  instr->hydrogen()->MustAllocateDoubleAligned());
1582  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1583  ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1584  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1586  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1587  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1589  } else {
1590  flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1591  }
1592  __ Mov(x10, Smi::FromInt(flags));
1593  __ Push(size, x10);
1594 
1595  CallRuntimeFromDeferred(
1596  Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
1597  __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1598 }
1599 
1600 
1601 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1602  Register receiver = ToRegister(instr->receiver());
1603  Register function = ToRegister(instr->function());
1604  Register length = ToRegister32(instr->length());
1605 
1606  Register elements = ToRegister(instr->elements());
1607  Register scratch = x5;
1608  ASSERT(receiver.Is(x0)); // Used for parameter count.
1609  ASSERT(function.Is(x1)); // Required by InvokeFunction.
1610  ASSERT(ToRegister(instr->result()).Is(x0));
1611  ASSERT(instr->IsMarkedAsCall());
1612 
1613  // Copy the arguments to this function possibly from the
1614  // adaptor frame below it.
1615  const uint32_t kArgumentsLimit = 1 * KB;
1616  __ Cmp(length, kArgumentsLimit);
1617  DeoptimizeIf(hi, instr->environment());
1618 
1619  // Push the receiver and use the register to keep the original
1620  // number of arguments.
1621  __ Push(receiver);
1622  Register argc = receiver;
1623  receiver = NoReg;
1624  __ Sxtw(argc, length);
1625  // The arguments are at a one pointer size offset from elements.
1626  __ Add(elements, elements, 1 * kPointerSize);
1627 
1628  // Loop through the arguments pushing them onto the execution
1629  // stack.
1630  Label invoke, loop;
1631  // length is a small non-negative integer, due to the test above.
1632  __ Cbz(length, &invoke);
1633  __ Bind(&loop);
1634  __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1635  __ Push(scratch);
1636  __ Subs(length, length, 1);
1637  __ B(ne, &loop);
1638 
1639  __ Bind(&invoke);
1640  ASSERT(instr->HasPointerMap());
1641  LPointerMap* pointers = instr->pointer_map();
1642  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1643  // The number of arguments is stored in argc (receiver) which is x0, as
1644  // expected by InvokeFunction.
1645  ParameterCount actual(argc);
1646  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1647 }
1648 
1649 
1650 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1651  Register result = ToRegister(instr->result());
1652 
1653  if (instr->hydrogen()->from_inlined()) {
1654  // When we are inside an inlined function, the arguments are the last things
1655  // that have been pushed on the stack. Therefore the arguments array can be
1656  // accessed directly from jssp.
1657  // However in the normal case, it is accessed via fp but there are two words
1658  // on the stack between fp and the arguments (the saved lr and fp) and the
1659  // LAccessArgumentsAt implementation take that into account.
1660  // In the inlined case we need to subtract the size of 2 words to jssp to
1661  // get a pointer which will work well with LAccessArgumentsAt.
1662  ASSERT(masm()->StackPointer().Is(jssp));
1663  __ Sub(result, jssp, 2 * kPointerSize);
1664  } else {
1665  ASSERT(instr->temp() != NULL);
1666  Register previous_fp = ToRegister(instr->temp());
1667 
1668  __ Ldr(previous_fp,
1670  __ Ldr(result,
1673  __ Csel(result, fp, previous_fp, ne);
1674  }
1675 }
1676 
1677 
1678 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1679  Register elements = ToRegister(instr->elements());
1680  Register result = ToRegister32(instr->result());
1681  Label done;
1682 
1683  // If no arguments adaptor frame the number of arguments is fixed.
1684  __ Cmp(fp, elements);
1685  __ Mov(result, scope()->num_parameters());
1686  __ B(eq, &done);
1687 
1688  // Arguments adaptor frame present. Get argument length from there.
1690  __ Ldr(result,
1691  UntagSmiMemOperand(result.X(),
1693 
1694  // Argument length is in result register.
1695  __ Bind(&done);
1696 }
1697 
1698 
1699 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1700  DoubleRegister left = ToDoubleRegister(instr->left());
1701  DoubleRegister right = ToDoubleRegister(instr->right());
1702  DoubleRegister result = ToDoubleRegister(instr->result());
1703 
1704  switch (instr->op()) {
1705  case Token::ADD: __ Fadd(result, left, right); break;
1706  case Token::SUB: __ Fsub(result, left, right); break;
1707  case Token::MUL: __ Fmul(result, left, right); break;
1708  case Token::DIV: __ Fdiv(result, left, right); break;
1709  case Token::MOD: {
1710  // The ECMA-262 remainder operator is the remainder from a truncating
1711  // (round-towards-zero) division. Note that this differs from IEEE-754.
1712  //
1713  // TODO(jbramley): See if it's possible to do this inline, rather than by
1714  // calling a helper function. With frintz (to produce the intermediate
1715  // quotient) and fmsub (to calculate the remainder without loss of
1716  // precision), it should be possible. However, we would need support for
1717  // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1718  // support that yet.
1719  ASSERT(left.Is(d0));
1720  ASSERT(right.Is(d1));
1721  __ CallCFunction(
1722  ExternalReference::mod_two_doubles_operation(isolate()),
1723  0, 2);
1724  ASSERT(result.Is(d0));
1725  break;
1726  }
1727  default:
1728  UNREACHABLE();
1729  break;
1730  }
1731 }
1732 
1733 
1734 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1735  ASSERT(ToRegister(instr->context()).is(cp));
1736  ASSERT(ToRegister(instr->left()).is(x1));
1737  ASSERT(ToRegister(instr->right()).is(x0));
1738  ASSERT(ToRegister(instr->result()).is(x0));
1739 
1740  BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
1741  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1742 }
1743 
1744 
1745 void LCodeGen::DoBitI(LBitI* instr) {
1746  Register result = ToRegister32(instr->result());
1747  Register left = ToRegister32(instr->left());
1748  Operand right = ToOperand32U(instr->right());
1749 
1750  switch (instr->op()) {
1751  case Token::BIT_AND: __ And(result, left, right); break;
1752  case Token::BIT_OR: __ Orr(result, left, right); break;
1753  case Token::BIT_XOR: __ Eor(result, left, right); break;
1754  default:
1755  UNREACHABLE();
1756  break;
1757  }
1758 }
1759 
1760 
1761 void LCodeGen::DoBitS(LBitS* instr) {
1762  Register result = ToRegister(instr->result());
1763  Register left = ToRegister(instr->left());
1764  Operand right = ToOperand(instr->right());
1765 
1766  switch (instr->op()) {
1767  case Token::BIT_AND: __ And(result, left, right); break;
1768  case Token::BIT_OR: __ Orr(result, left, right); break;
1769  case Token::BIT_XOR: __ Eor(result, left, right); break;
1770  default:
1771  UNREACHABLE();
1772  break;
1773  }
1774 }
1775 
1776 
1777 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
1778  if (FLAG_debug_code && check->hydrogen()->skip_check()) {
1779  __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed);
1780  } else {
1781  DeoptimizeIf(cc, check->environment());
1782  }
1783 }
1784 
1785 
1786 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1787  if (instr->hydrogen()->skip_check()) return;
1788 
1789  ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
1790  Register length = ToRegister32(instr->length());
1791 
1792  if (instr->index()->IsConstantOperand()) {
1793  int constant_index =
1794  ToInteger32(LConstantOperand::cast(instr->index()));
1795 
1796  if (instr->hydrogen()->length()->representation().IsSmi()) {
1797  __ Cmp(length, Smi::FromInt(constant_index));
1798  } else {
1799  __ Cmp(length, constant_index);
1800  }
1801  } else {
1802  ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
1803  __ Cmp(length, ToRegister32(instr->index()));
1804  }
1805  Condition condition = instr->hydrogen()->allow_equality() ? lo : ls;
1806  ApplyCheckIf(condition, instr);
1807 }
1808 
1809 
1810 void LCodeGen::DoBranch(LBranch* instr) {
1811  Representation r = instr->hydrogen()->value()->representation();
1812  Label* true_label = instr->TrueLabel(chunk_);
1813  Label* false_label = instr->FalseLabel(chunk_);
1814 
1815  if (r.IsInteger32()) {
1816  ASSERT(!info()->IsStub());
1817  EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1818  } else if (r.IsSmi()) {
1819  ASSERT(!info()->IsStub());
1820  STATIC_ASSERT(kSmiTag == 0);
1821  EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1822  } else if (r.IsDouble()) {
1823  DoubleRegister value = ToDoubleRegister(instr->value());
1824  // Test the double value. Zero and NaN are false.
1825  EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1826  } else {
1827  ASSERT(r.IsTagged());
1828  Register value = ToRegister(instr->value());
1829  HType type = instr->hydrogen()->value()->type();
1830 
1831  if (type.IsBoolean()) {
1832  ASSERT(!info()->IsStub());
1833  __ CompareRoot(value, Heap::kTrueValueRootIndex);
1834  EmitBranch(instr, eq);
1835  } else if (type.IsSmi()) {
1836  ASSERT(!info()->IsStub());
1837  EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1838  } else if (type.IsJSArray()) {
1839  ASSERT(!info()->IsStub());
1840  EmitGoto(instr->TrueDestination(chunk()));
1841  } else if (type.IsHeapNumber()) {
1842  ASSERT(!info()->IsStub());
1843  __ Ldr(double_scratch(), FieldMemOperand(value,
1845  // Test the double value. Zero and NaN are false.
1846  EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1847  } else if (type.IsString()) {
1848  ASSERT(!info()->IsStub());
1849  Register temp = ToRegister(instr->temp1());
1850  __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1851  EmitCompareAndBranch(instr, ne, temp, 0);
1852  } else {
1853  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1854  // Avoid deopts in the case where we've never executed this path before.
1855  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1856 
1857  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1858  // undefined -> false.
1859  __ JumpIfRoot(
1860  value, Heap::kUndefinedValueRootIndex, false_label);
1861  }
1862 
1863  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1864  // Boolean -> its value.
1865  __ JumpIfRoot(
1866  value, Heap::kTrueValueRootIndex, true_label);
1867  __ JumpIfRoot(
1868  value, Heap::kFalseValueRootIndex, false_label);
1869  }
1870 
1871  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1872  // 'null' -> false.
1873  __ JumpIfRoot(
1874  value, Heap::kNullValueRootIndex, false_label);
1875  }
1876 
1877  if (expected.Contains(ToBooleanStub::SMI)) {
1878  // Smis: 0 -> false, all other -> true.
1879  ASSERT(Smi::FromInt(0) == 0);
1880  __ Cbz(value, false_label);
1881  __ JumpIfSmi(value, true_label);
1882  } else if (expected.NeedsMap()) {
1883  // If we need a map later and have a smi, deopt.
1884  DeoptimizeIfSmi(value, instr->environment());
1885  }
1886 
1887  Register map = NoReg;
1888  Register scratch = NoReg;
1889 
1890  if (expected.NeedsMap()) {
1891  ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
1892  map = ToRegister(instr->temp1());
1893  scratch = ToRegister(instr->temp2());
1894 
1895  __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1896 
1897  if (expected.CanBeUndetectable()) {
1898  // Undetectable -> false.
1899  __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1900  __ TestAndBranchIfAnySet(
1901  scratch, 1 << Map::kIsUndetectable, false_label);
1902  }
1903  }
1904 
1905  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1906  // spec object -> true.
1907  __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1908  __ B(ge, true_label);
1909  }
1910 
1911  if (expected.Contains(ToBooleanStub::STRING)) {
1912  // String value -> false iff empty.
1913  Label not_string;
1914  __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1915  __ B(ge, &not_string);
1916  __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1917  __ Cbz(scratch, false_label);
1918  __ B(true_label);
1919  __ Bind(&not_string);
1920  }
1921 
1922  if (expected.Contains(ToBooleanStub::SYMBOL)) {
1923  // Symbol value -> true.
1924  __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1925  __ B(eq, true_label);
1926  }
1927 
1928  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1929  Label not_heap_number;
1930  __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
1931 
1932  __ Ldr(double_scratch(),
1934  __ Fcmp(double_scratch(), 0.0);
1935  // If we got a NaN (overflow bit is set), jump to the false branch.
1936  __ B(vs, false_label);
1937  __ B(eq, false_label);
1938  __ B(true_label);
1939  __ Bind(&not_heap_number);
1940  }
1941 
1942  if (!expected.IsGeneric()) {
1943  // We've seen something for the first time -> deopt.
1944  // This can only happen if we are not generic already.
1945  Deoptimize(instr->environment());
1946  }
1947  }
1948  }
1949 }
1950 
1951 
1952 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1954  int arity,
1955  LInstruction* instr,
1956  Register function_reg) {
1957  bool dont_adapt_arguments =
1958  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1959  bool can_invoke_directly =
1960  dont_adapt_arguments || formal_parameter_count == arity;
1961 
1962  // The function interface relies on the following register assignments.
1963  ASSERT(function_reg.Is(x1) || function_reg.IsNone());
1964  Register arity_reg = x0;
1965 
1966  LPointerMap* pointers = instr->pointer_map();
1967 
1968  // If necessary, load the function object.
1969  if (function_reg.IsNone()) {
1970  function_reg = x1;
1971  __ LoadObject(function_reg, function);
1972  }
1973 
1974  if (FLAG_debug_code) {
1975  Label is_not_smi;
1976  // Try to confirm that function_reg (x1) is a tagged pointer.
1977  __ JumpIfNotSmi(function_reg, &is_not_smi);
1978  __ Abort(kExpectedFunctionObject);
1979  __ Bind(&is_not_smi);
1980  }
1981 
1982  if (can_invoke_directly) {
1983  // Change context.
1984  __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
1985 
1986  // Set the arguments count if adaption is not needed. Assumes that x0 is
1987  // available to write to at this point.
1988  if (dont_adapt_arguments) {
1989  __ Mov(arity_reg, arity);
1990  }
1991 
1992  // Invoke function.
1993  __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
1994  __ Call(x10);
1995 
1996  // Set up deoptimization.
1997  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1998  } else {
1999  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2000  ParameterCount count(arity);
2001  ParameterCount expected(formal_parameter_count);
2002  __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2003  }
2004 }
2005 
2006 
2007 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2008  ASSERT(instr->IsMarkedAsCall());
2009  ASSERT(ToRegister(instr->result()).Is(x0));
2010 
2011  LPointerMap* pointers = instr->pointer_map();
2012  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2013 
2014  if (instr->target()->IsConstantOperand()) {
2015  LConstantOperand* target = LConstantOperand::cast(instr->target());
2016  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2017  generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2018  // TODO(all): on ARM we use a call descriptor to specify a storage mode
2019  // but on ARM64 we only have one storage mode so it isn't necessary. Check
2020  // this understanding is correct.
2021  __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2022  } else {
2023  ASSERT(instr->target()->IsRegister());
2024  Register target = ToRegister(instr->target());
2025  generator.BeforeCall(__ CallSize(target));
2026  __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2027  __ Call(target);
2028  }
2029  generator.AfterCall();
2030 }
2031 
2032 
2033 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2034  ASSERT(instr->IsMarkedAsCall());
2035  ASSERT(ToRegister(instr->function()).is(x1));
2036 
2037  if (instr->hydrogen()->pass_argument_count()) {
2038  __ Mov(x0, Operand(instr->arity()));
2039  }
2040 
2041  // Change context.
2043 
2044  // Load the code entry address
2046  __ Call(x10);
2047 
2048  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2049 }
2050 
2051 
2052 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2053  CallRuntime(instr->function(), instr->arity(), instr);
2054 }
2055 
2056 
2057 void LCodeGen::DoCallStub(LCallStub* instr) {
2058  ASSERT(ToRegister(instr->context()).is(cp));
2059  ASSERT(ToRegister(instr->result()).is(x0));
2060  switch (instr->hydrogen()->major_key()) {
2061  case CodeStub::RegExpExec: {
2062  RegExpExecStub stub;
2063  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2064  break;
2065  }
2066  case CodeStub::SubString: {
2067  SubStringStub stub;
2068  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2069  break;
2070  }
2071  case CodeStub::StringCompare: {
2072  StringCompareStub stub;
2073  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2074  break;
2075  }
2076  default:
2077  UNREACHABLE();
2078  }
2079 }
2080 
2081 
2082 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2083  GenerateOsrPrologue();
2084 }
2085 
2086 
2087 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2088  Register temp = ToRegister(instr->temp());
2089  {
2090  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2091  __ Push(object);
2092  __ Mov(cp, 0);
2093  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2094  RecordSafepointWithRegisters(
2095  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2096  __ StoreToSafepointRegisterSlot(x0, temp);
2097  }
2098  DeoptimizeIfSmi(temp, instr->environment());
2099 }
2100 
2101 
2102 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2103  class DeferredCheckMaps: public LDeferredCode {
2104  public:
2105  DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2106  : LDeferredCode(codegen), instr_(instr), object_(object) {
2107  SetExit(check_maps());
2108  }
2109  virtual void Generate() {
2110  codegen()->DoDeferredInstanceMigration(instr_, object_);
2111  }
2112  Label* check_maps() { return &check_maps_; }
2113  virtual LInstruction* instr() { return instr_; }
2114  private:
2115  LCheckMaps* instr_;
2116  Label check_maps_;
2117  Register object_;
2118  };
2119 
2120  if (instr->hydrogen()->CanOmitMapChecks()) {
2121  ASSERT(instr->value() == NULL);
2122  ASSERT(instr->temp() == NULL);
2123  return;
2124  }
2125 
2126  Register object = ToRegister(instr->value());
2127  Register map_reg = ToRegister(instr->temp());
2128 
2129  __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2130 
2131  DeferredCheckMaps* deferred = NULL;
2132  if (instr->hydrogen()->has_migration_target()) {
2133  deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2134  __ Bind(deferred->check_maps());
2135  }
2136 
2137  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
2138  Label success;
2139  for (int i = 0; i < map_set.size(); i++) {
2140  Handle<Map> map = map_set.at(i).handle();
2141  __ CompareMap(map_reg, map);
2142  __ B(eq, &success);
2143  }
2144 
2145  // We didn't match a map.
2146  if (instr->hydrogen()->has_migration_target()) {
2147  __ B(deferred->entry());
2148  } else {
2149  Deoptimize(instr->environment());
2150  }
2151 
2152  __ Bind(&success);
2153 }
2154 
2155 
2156 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2157  if (!instr->hydrogen()->value()->IsHeapObject()) {
2158  DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
2159  }
2160 }
2161 
2162 
2163 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2164  Register value = ToRegister(instr->value());
2165  ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
2166  DeoptimizeIfNotSmi(value, instr->environment());
2167 }
2168 
2169 
2170 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2171  Register input = ToRegister(instr->value());
2172  Register scratch = ToRegister(instr->temp());
2173 
2174  __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2175  __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2176 
2177  if (instr->hydrogen()->is_interval_check()) {
2178  InstanceType first, last;
2179  instr->hydrogen()->GetCheckInterval(&first, &last);
2180 
2181  __ Cmp(scratch, first);
2182  if (first == last) {
2183  // If there is only one type in the interval check for equality.
2184  DeoptimizeIf(ne, instr->environment());
2185  } else if (last == LAST_TYPE) {
2186  // We don't need to compare with the higher bound of the interval.
2187  DeoptimizeIf(lo, instr->environment());
2188  } else {
2189  // If we are below the lower bound, set the C flag and clear the Z flag
2190  // to force a deopt.
2191  __ Ccmp(scratch, last, CFlag, hs);
2192  DeoptimizeIf(hi, instr->environment());
2193  }
2194  } else {
2195  uint8_t mask;
2196  uint8_t tag;
2197  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2198 
2199  if (IsPowerOf2(mask)) {
2200  ASSERT((tag == 0) || (tag == mask));
2201  if (tag == 0) {
2202  DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
2203  } else {
2204  DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
2205  }
2206  } else {
2207  if (tag == 0) {
2208  __ Tst(scratch, mask);
2209  } else {
2210  __ And(scratch, scratch, mask);
2211  __ Cmp(scratch, tag);
2212  }
2213  DeoptimizeIf(ne, instr->environment());
2214  }
2215  }
2216 }
2217 
2218 
2219 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2220  DoubleRegister input = ToDoubleRegister(instr->unclamped());
2221  Register result = ToRegister32(instr->result());
2222  __ ClampDoubleToUint8(result, input, double_scratch());
2223 }
2224 
2225 
2226 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2227  Register input = ToRegister32(instr->unclamped());
2228  Register result = ToRegister32(instr->result());
2229  __ ClampInt32ToUint8(result, input);
2230 }
2231 
2232 
2233 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2234  Register input = ToRegister(instr->unclamped());
2235  Register result = ToRegister32(instr->result());
2236  Register scratch = ToRegister(instr->temp1());
2237  Label done;
2238 
2239  // Both smi and heap number cases are handled.
2240  Label is_not_smi;
2241  __ JumpIfNotSmi(input, &is_not_smi);
2242  __ SmiUntag(result.X(), input);
2243  __ ClampInt32ToUint8(result);
2244  __ B(&done);
2245 
2246  __ Bind(&is_not_smi);
2247 
2248  // Check for heap number.
2249  Label is_heap_number;
2250  __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2251  __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
2252 
2253  // Check for undefined. Undefined is coverted to zero for clamping conversion.
2254  DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
2255  instr->environment());
2256  __ Mov(result, 0);
2257  __ B(&done);
2258 
2259  // Heap number case.
2260  __ Bind(&is_heap_number);
2261  DoubleRegister dbl_scratch = double_scratch();
2262  DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
2263  __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2264  __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2265 
2266  __ Bind(&done);
2267 }
2268 
2269 
2270 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2271  DoubleRegister value_reg = ToDoubleRegister(instr->value());
2272  Register result_reg = ToRegister(instr->result());
2273  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2274  __ Fmov(result_reg, value_reg);
2275  __ Mov(result_reg, Operand(result_reg, LSR, 32));
2276  } else {
2277  __ Fmov(result_reg.W(), value_reg.S());
2278  }
2279 }
2280 
2281 
2282 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2283  Register hi_reg = ToRegister(instr->hi());
2284  Register lo_reg = ToRegister(instr->lo());
2285  Register temp = ToRegister(instr->temp());
2286  DoubleRegister result_reg = ToDoubleRegister(instr->result());
2287 
2288  __ And(temp, lo_reg, Operand(0xffffffff));
2289  __ Orr(temp, temp, Operand(hi_reg, LSL, 32));
2290  __ Fmov(result_reg, temp);
2291 }
2292 
2293 
2294 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2295  Handle<String> class_name = instr->hydrogen()->class_name();
2296  Label* true_label = instr->TrueLabel(chunk_);
2297  Label* false_label = instr->FalseLabel(chunk_);
2298  Register input = ToRegister(instr->value());
2299  Register scratch1 = ToRegister(instr->temp1());
2300  Register scratch2 = ToRegister(instr->temp2());
2301 
2302  __ JumpIfSmi(input, false_label);
2303 
2304  Register map = scratch2;
2305  if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
2306  // Assuming the following assertions, we can use the same compares to test
2307  // for both being a function type and being in the object type range.
2312  LAST_SPEC_OBJECT_TYPE - 1);
2314 
2315  // We expect CompareObjectType to load the object instance type in scratch1.
2316  __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2317  __ B(lt, false_label);
2318  __ B(eq, true_label);
2319  __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2320  __ B(eq, true_label);
2321  } else {
2322  __ IsObjectJSObjectType(input, map, scratch1, false_label);
2323  }
2324 
2325  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2326  // Check if the constructor in the map is a function.
2327  __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
2328 
2329  // Objects with a non-function constructor have class 'Object'.
2330  if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
2331  __ JumpIfNotObjectType(
2332  scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
2333  } else {
2334  __ JumpIfNotObjectType(
2335  scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
2336  }
2337 
2338  // The constructor function is in scratch1. Get its instance class name.
2339  __ Ldr(scratch1,
2341  __ Ldr(scratch1,
2342  FieldMemOperand(scratch1,
2344 
2345  // The class name we are testing against is internalized since it's a literal.
2346  // The name in the constructor is internalized because of the way the context
2347  // is booted. This routine isn't expected to work for random API-created
2348  // classes and it doesn't have to because you can't access it with natives
2349  // syntax. Since both sides are internalized it is sufficient to use an
2350  // identity comparison.
2351  EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2352 }
2353 
2354 
2355 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2356  ASSERT(instr->hydrogen()->representation().IsDouble());
2357  FPRegister object = ToDoubleRegister(instr->object());
2358  Register temp = ToRegister(instr->temp());
2359 
2360  // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2361  // (relatively expensive) hole-NaN check.
2362  __ Fcmp(object, object);
2363  __ B(vc, instr->FalseLabel(chunk_));
2364 
2365  // We have a NaN, but is it the hole?
2366  __ Fmov(temp, object);
2367  EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2368 }
2369 
2370 
2371 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2372  ASSERT(instr->hydrogen()->representation().IsTagged());
2373  Register object = ToRegister(instr->object());
2374 
2375  EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2376 }
2377 
2378 
2379 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2380  Register value = ToRegister(instr->value());
2381  Register map = ToRegister(instr->temp());
2382 
2383  __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2384  EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2385 }
2386 
2387 
2388 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2389  Representation rep = instr->hydrogen()->value()->representation();
2390  ASSERT(!rep.IsInteger32());
2391  Register scratch = ToRegister(instr->temp());
2392 
2393  if (rep.IsDouble()) {
2394  __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2395  instr->TrueLabel(chunk()));
2396  } else {
2397  Register value = ToRegister(instr->value());
2398  __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2399  instr->FalseLabel(chunk()), DO_SMI_CHECK);
2400  __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset));
2401  __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
2402  }
2403  EmitGoto(instr->FalseDestination(chunk()));
2404 }
2405 
2406 
2407 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2408  LOperand* left = instr->left();
2409  LOperand* right = instr->right();
2410  Condition cond = TokenToCondition(instr->op(), false);
2411 
2412  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2413  // We can statically evaluate the comparison.
2414  double left_val = ToDouble(LConstantOperand::cast(left));
2415  double right_val = ToDouble(LConstantOperand::cast(right));
2416  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2417  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2418  EmitGoto(next_block);
2419  } else {
2420  if (instr->is_double()) {
2421  if (right->IsConstantOperand()) {
2422  __ Fcmp(ToDoubleRegister(left),
2423  ToDouble(LConstantOperand::cast(right)));
2424  } else if (left->IsConstantOperand()) {
2425  // Transpose the operands and reverse the condition.
2426  __ Fcmp(ToDoubleRegister(right),
2427  ToDouble(LConstantOperand::cast(left)));
2428  cond = ReverseConditionForCmp(cond);
2429  } else {
2430  __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2431  }
2432 
2433  // If a NaN is involved, i.e. the result is unordered (V set),
2434  // jump to false block label.
2435  __ B(vs, instr->FalseLabel(chunk_));
2436  EmitBranch(instr, cond);
2437  } else {
2438  if (instr->hydrogen_value()->representation().IsInteger32()) {
2439  if (right->IsConstantOperand()) {
2440  EmitCompareAndBranch(instr,
2441  cond,
2442  ToRegister32(left),
2443  ToOperand32I(right));
2444  } else {
2445  // Transpose the operands and reverse the condition.
2446  EmitCompareAndBranch(instr,
2447  ReverseConditionForCmp(cond),
2448  ToRegister32(right),
2449  ToOperand32I(left));
2450  }
2451  } else {
2452  ASSERT(instr->hydrogen_value()->representation().IsSmi());
2453  if (right->IsConstantOperand()) {
2454  int32_t value = ToInteger32(LConstantOperand::cast(right));
2455  EmitCompareAndBranch(instr,
2456  cond,
2457  ToRegister(left),
2458  Operand(Smi::FromInt(value)));
2459  } else if (left->IsConstantOperand()) {
2460  // Transpose the operands and reverse the condition.
2461  int32_t value = ToInteger32(LConstantOperand::cast(left));
2462  EmitCompareAndBranch(instr,
2463  ReverseConditionForCmp(cond),
2464  ToRegister(right),
2465  Operand(Smi::FromInt(value)));
2466  } else {
2467  EmitCompareAndBranch(instr,
2468  cond,
2469  ToRegister(left),
2470  ToRegister(right));
2471  }
2472  }
2473  }
2474  }
2475 }
2476 
2477 
2478 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2479  Register left = ToRegister(instr->left());
2480  Register right = ToRegister(instr->right());
2481  EmitCompareAndBranch(instr, eq, left, right);
2482 }
2483 
2484 
2485 void LCodeGen::DoCmpT(LCmpT* instr) {
2486  ASSERT(ToRegister(instr->context()).is(cp));
2487  Token::Value op = instr->op();
2488  Condition cond = TokenToCondition(op, false);
2489 
2490  ASSERT(ToRegister(instr->left()).Is(x1));
2491  ASSERT(ToRegister(instr->right()).Is(x0));
2492  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2493  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2494  // Signal that we don't inline smi code before this stub.
2496 
2497  // Return true or false depending on CompareIC result.
2498  // This instruction is marked as call. We can clobber any register.
2499  ASSERT(instr->IsMarkedAsCall());
2500  __ LoadTrueFalseRoots(x1, x2);
2501  __ Cmp(x0, 0);
2502  __ Csel(ToRegister(instr->result()), x1, x2, cond);
2503 }
2504 
2505 
2506 void LCodeGen::DoConstantD(LConstantD* instr) {
2507  ASSERT(instr->result()->IsDoubleRegister());
2508  DoubleRegister result = ToDoubleRegister(instr->result());
2509  __ Fmov(result, instr->value());
2510 }
2511 
2512 
2513 void LCodeGen::DoConstantE(LConstantE* instr) {
2514  __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2515 }
2516 
2517 
2518 void LCodeGen::DoConstantI(LConstantI* instr) {
2519  ASSERT(is_int32(instr->value()));
2520  // Cast the value here to ensure that the value isn't sign extended by the
2521  // implicit Operand constructor.
2522  __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2523 }
2524 
2525 
2526 void LCodeGen::DoConstantS(LConstantS* instr) {
2527  __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2528 }
2529 
2530 
2531 void LCodeGen::DoConstantT(LConstantT* instr) {
2532  Handle<Object> value = instr->value(isolate());
2534  __ LoadObject(ToRegister(instr->result()), value);
2535 }
2536 
2537 
2538 void LCodeGen::DoContext(LContext* instr) {
2539  // If there is a non-return use, the context must be moved to a register.
2540  Register result = ToRegister(instr->result());
2541  if (info()->IsOptimizing()) {
2543  } else {
2544  // If there is no frame, the context must be in cp.
2545  ASSERT(result.is(cp));
2546  }
2547 }
2548 
2549 
2550 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2551  Register reg = ToRegister(instr->value());
2552  Handle<HeapObject> object = instr->hydrogen()->object().handle();
2554  if (isolate()->heap()->InNewSpace(*object)) {
2555  UseScratchRegisterScope temps(masm());
2556  Register temp = temps.AcquireX();
2557  Handle<Cell> cell = isolate()->factory()->NewCell(object);
2558  __ Mov(temp, Operand(Handle<Object>(cell)));
2559  __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2560  __ Cmp(reg, temp);
2561  } else {
2562  __ Cmp(reg, Operand(object));
2563  }
2564  DeoptimizeIf(ne, instr->environment());
2565 }
2566 
2567 
2568 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2569  last_lazy_deopt_pc_ = masm()->pc_offset();
2570  ASSERT(instr->HasEnvironment());
2571  LEnvironment* env = instr->environment();
2572  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2573  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2574 }
2575 
2576 
2577 void LCodeGen::DoDateField(LDateField* instr) {
2578  Register object = ToRegister(instr->date());
2579  Register result = ToRegister(instr->result());
2580  Register temp1 = x10;
2581  Register temp2 = x11;
2582  Smi* index = instr->index();
2583  Label runtime, done, deopt, obj_ok;
2584 
2585  ASSERT(object.is(result) && object.Is(x0));
2586  ASSERT(instr->IsMarkedAsCall());
2587 
2588  __ JumpIfSmi(object, &deopt);
2589  __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2590  __ B(eq, &obj_ok);
2591 
2592  __ Bind(&deopt);
2593  Deoptimize(instr->environment());
2594 
2595  __ Bind(&obj_ok);
2596  if (index->value() == 0) {
2597  __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2598  } else {
2599  if (index->value() < JSDate::kFirstUncachedField) {
2600  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2601  __ Mov(temp1, Operand(stamp));
2602  __ Ldr(temp1, MemOperand(temp1));
2603  __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2604  __ Cmp(temp1, temp2);
2605  __ B(ne, &runtime);
2606  __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2607  kPointerSize * index->value()));
2608  __ B(&done);
2609  }
2610 
2611  __ Bind(&runtime);
2612  __ Mov(x1, Operand(index));
2613  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2614  }
2615 
2616  __ Bind(&done);
2617 }
2618 
2619 
2620 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2621  Deoptimizer::BailoutType type = instr->hydrogen()->type();
2622  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2623  // needed return address), even though the implementation of LAZY and EAGER is
2624  // now identical. When LAZY is eventually completely folded into EAGER, remove
2625  // the special case below.
2626  if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2627  type = Deoptimizer::LAZY;
2628  }
2629 
2630  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
2631  Deoptimize(instr->environment(), &type);
2632 }
2633 
2634 
2635 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2636  Register dividend = ToRegister32(instr->dividend());
2637  int32_t divisor = instr->divisor();
2638  Register result = ToRegister32(instr->result());
2639  ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
2640  ASSERT(!result.is(dividend));
2641 
2642  // Check for (0 / -x) that will produce negative zero.
2643  HDiv* hdiv = instr->hydrogen();
2644  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2645  __ Cmp(dividend, 0);
2646  DeoptimizeIf(eq, instr->environment());
2647  }
2648  // Check for (kMinInt / -1).
2649  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2650  __ Cmp(dividend, kMinInt);
2651  DeoptimizeIf(eq, instr->environment());
2652  }
2653  // Deoptimize if remainder will not be 0.
2654  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2655  divisor != 1 && divisor != -1) {
2656  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2657  __ Tst(dividend, mask);
2658  DeoptimizeIf(ne, instr->environment());
2659  }
2660 
2661  if (divisor == -1) { // Nice shortcut, not needed for correctness.
2662  __ Neg(result, dividend);
2663  return;
2664  }
2665  int32_t shift = WhichPowerOf2Abs(divisor);
2666  if (shift == 0) {
2667  __ Mov(result, dividend);
2668  } else if (shift == 1) {
2669  __ Add(result, dividend, Operand(dividend, LSR, 31));
2670  } else {
2671  __ Mov(result, Operand(dividend, ASR, 31));
2672  __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2673  }
2674  if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2675  if (divisor < 0) __ Neg(result, result);
2676 }
2677 
2678 
2679 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2680  Register dividend = ToRegister32(instr->dividend());
2681  int32_t divisor = instr->divisor();
2682  Register result = ToRegister32(instr->result());
2683  ASSERT(!AreAliased(dividend, result));
2684 
2685  if (divisor == 0) {
2686  Deoptimize(instr->environment());
2687  return;
2688  }
2689 
2690  // Check for (0 / -x) that will produce negative zero.
2691  HDiv* hdiv = instr->hydrogen();
2692  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2693  DeoptimizeIfZero(dividend, instr->environment());
2694  }
2695 
2696  __ TruncatingDiv(result, dividend, Abs(divisor));
2697  if (divisor < 0) __ Neg(result, result);
2698 
2699  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2700  Register temp = ToRegister32(instr->temp());
2701  ASSERT(!AreAliased(dividend, result, temp));
2702  __ Sxtw(dividend.X(), dividend);
2703  __ Mov(temp, divisor);
2704  __ Smsubl(temp.X(), result, temp, dividend.X());
2705  DeoptimizeIfNotZero(temp, instr->environment());
2706  }
2707 }
2708 
2709 
2710 void LCodeGen::DoDivI(LDivI* instr) {
2711  HBinaryOperation* hdiv = instr->hydrogen();
2712  Register dividend = ToRegister32(instr->left());
2713  Register divisor = ToRegister32(instr->right());
2714  Register result = ToRegister32(instr->result());
2715 
2716  // Issue the division first, and then check for any deopt cases whilst the
2717  // result is computed.
2718  __ Sdiv(result, dividend, divisor);
2719 
2720  if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2721  ASSERT_EQ(NULL, instr->temp());
2722  return;
2723  }
2724 
2725  Label deopt;
2726  // Check for x / 0.
2727  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2728  __ Cbz(divisor, &deopt);
2729  }
2730 
2731  // Check for (0 / -x) as that will produce negative zero.
2732  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2733  __ Cmp(divisor, 0);
2734 
2735  // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2736  // zero, ie. zero dividend with negative divisor deopts.
2737  // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2738  // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2739  __ Ccmp(dividend, 0, NoFlag, mi);
2740  __ B(eq, &deopt);
2741  }
2742 
2743  // Check for (kMinInt / -1).
2744  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2745  // Test dividend for kMinInt by subtracting one (cmp) and checking for
2746  // overflow.
2747  __ Cmp(dividend, 1);
2748  // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2749  // -1. If overflow is clear, set the flags for condition ne, as the
2750  // dividend isn't -1, and thus we shouldn't deopt.
2751  __ Ccmp(divisor, -1, NoFlag, vs);
2752  __ B(eq, &deopt);
2753  }
2754 
2755  // Compute remainder and deopt if it's not zero.
2756  Register remainder = ToRegister32(instr->temp());
2757  __ Msub(remainder, result, divisor, dividend);
2758  __ Cbnz(remainder, &deopt);
2759 
2760  Label div_ok;
2761  __ B(&div_ok);
2762  __ Bind(&deopt);
2763  Deoptimize(instr->environment());
2764  __ Bind(&div_ok);
2765 }
2766 
2767 
2768 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2769  DoubleRegister input = ToDoubleRegister(instr->value());
2770  Register result = ToRegister32(instr->result());
2771 
2772  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2773  DeoptimizeIfMinusZero(input, instr->environment());
2774  }
2775 
2776  __ TryConvertDoubleToInt32(result, input, double_scratch());
2777  DeoptimizeIf(ne, instr->environment());
2778 
2779  if (instr->tag_result()) {
2780  __ SmiTag(result.X());
2781  }
2782 }
2783 
2784 
2785 void LCodeGen::DoDrop(LDrop* instr) {
2786  __ Drop(instr->count());
2787 }
2788 
2789 
2790 void LCodeGen::DoDummy(LDummy* instr) {
2791  // Nothing to see here, move on!
2792 }
2793 
2794 
2795 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2796  // Nothing to see here, move on!
2797 }
2798 
2799 
2800 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2801  ASSERT(ToRegister(instr->context()).is(cp));
2802  // FunctionLiteral instruction is marked as call, we can trash any register.
2803  ASSERT(instr->IsMarkedAsCall());
2804 
2805  // Use the fast case closure allocation code that allocates in new
2806  // space for nested functions that don't need literals cloning.
2807  bool pretenure = instr->hydrogen()->pretenure();
2808  if (!pretenure && instr->hydrogen()->has_no_literals()) {
2809  FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
2810  instr->hydrogen()->is_generator());
2811  __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2812  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2813  } else {
2814  __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2815  __ Mov(x1, Operand(pretenure ? factory()->true_value()
2816  : factory()->false_value()));
2817  __ Push(cp, x2, x1);
2818  CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
2819  }
2820 }
2821 
2822 
2823 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2824  Register map = ToRegister(instr->map());
2825  Register result = ToRegister(instr->result());
2826  Label load_cache, done;
2827 
2828  __ EnumLengthUntagged(result, map);
2829  __ Cbnz(result, &load_cache);
2830 
2831  __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2832  __ B(&done);
2833 
2834  __ Bind(&load_cache);
2835  __ LoadInstanceDescriptors(map, result);
2837  __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2838  DeoptimizeIfZero(result, instr->environment());
2839 
2840  __ Bind(&done);
2841 }
2842 
2843 
2844 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2845  Register object = ToRegister(instr->object());
2846  Register null_value = x5;
2847 
2848  ASSERT(instr->IsMarkedAsCall());
2849  ASSERT(object.Is(x0));
2850 
2851  Label deopt;
2852 
2853  __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt);
2854 
2855  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2856  __ Cmp(object, null_value);
2857  __ B(eq, &deopt);
2858 
2859  __ JumpIfSmi(object, &deopt);
2860 
2862  __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2863  __ B(le, &deopt);
2864 
2865  Label use_cache, call_runtime;
2866  __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2867 
2868  __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2869  __ B(&use_cache);
2870 
2871  __ Bind(&deopt);
2872  Deoptimize(instr->environment());
2873 
2874  // Get the set of properties to enumerate.
2875  __ Bind(&call_runtime);
2876  __ Push(object);
2877  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2878 
2879  __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2880  __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
2881 
2882  __ Bind(&use_cache);
2883 }
2884 
2885 
2886 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2887  Register input = ToRegister(instr->value());
2888  Register result = ToRegister(instr->result());
2889 
2890  __ AssertString(input);
2891 
2892  // Assert that we can use a W register load to get the hash.
2894  __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2895  __ IndexFromHash(result, result);
2896 }
2897 
2898 
2899 void LCodeGen::EmitGoto(int block) {
2900  // Do not emit jump if we are emitting a goto to the next block.
2901  if (!IsNextEmittedBlock(block)) {
2902  __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2903  }
2904 }
2905 
2906 
2907 void LCodeGen::DoGoto(LGoto* instr) {
2908  EmitGoto(instr->block_id());
2909 }
2910 
2911 
2912 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2913  LHasCachedArrayIndexAndBranch* instr) {
2914  Register input = ToRegister(instr->value());
2915  Register temp = ToRegister32(instr->temp());
2916 
2917  // Assert that the cache status bits fit in a W register.
2919  __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2921  EmitBranch(instr, eq);
2922 }
2923 
2924 
2925 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2926 // to test but is only used in very restricted ways. The only possible kinds
2927 // of intervals are:
2928 // - [ FIRST_TYPE, instr->to() ]
2929 // - [ instr->form(), LAST_TYPE ]
2930 // - instr->from() == instr->to()
2931 //
2932 // These kinds of intervals can be check with only one compare instruction
2933 // providing the correct value and test condition are used.
2934 //
2935 // TestType() will return the value to use in the compare instruction and
2936 // BranchCondition() will return the condition to use depending on the kind
2937 // of interval actually specified in the instruction.
2938 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2939  InstanceType from = instr->from();
2940  InstanceType to = instr->to();
2941  if (from == FIRST_TYPE) return to;
2942  ASSERT((from == to) || (to == LAST_TYPE));
2943  return from;
2944 }
2945 
2946 
2947 // See comment above TestType function for what this function does.
2948 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2949  InstanceType from = instr->from();
2950  InstanceType to = instr->to();
2951  if (from == to) return eq;
2952  if (to == LAST_TYPE) return hs;
2953  if (from == FIRST_TYPE) return ls;
2954  UNREACHABLE();
2955  return eq;
2956 }
2957 
2958 
2959 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2960  Register input = ToRegister(instr->value());
2961  Register scratch = ToRegister(instr->temp());
2962 
2963  if (!instr->hydrogen()->value()->IsHeapObject()) {
2964  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2965  }
2966  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2967  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2968 }
2969 
2970 
2971 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
2972  Register result = ToRegister(instr->result());
2973  Register base = ToRegister(instr->base_object());
2974  if (instr->offset()->IsConstantOperand()) {
2975  __ Add(result, base, ToOperand32I(instr->offset()));
2976  } else {
2977  __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
2978  }
2979 }
2980 
2981 
2982 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2983  ASSERT(ToRegister(instr->context()).is(cp));
2984  // Assert that the arguments are in the registers expected by InstanceofStub.
2985  ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
2986  ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
2987 
2988  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2989  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2990 
2991  // InstanceofStub returns a result in x0:
2992  // 0 => not an instance
2993  // smi 1 => instance.
2994  __ Cmp(x0, 0);
2995  __ LoadTrueFalseRoots(x0, x1);
2996  __ Csel(x0, x0, x1, eq);
2997 }
2998 
2999 
3000 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3001  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
3002  public:
3003  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
3004  LInstanceOfKnownGlobal* instr)
3005  : LDeferredCode(codegen), instr_(instr) { }
3006  virtual void Generate() {
3007  codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
3008  }
3009  virtual LInstruction* instr() { return instr_; }
3010  private:
3011  LInstanceOfKnownGlobal* instr_;
3012  };
3013 
3014  DeferredInstanceOfKnownGlobal* deferred =
3015  new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3016 
3017  Label map_check, return_false, cache_miss, done;
3018  Register object = ToRegister(instr->value());
3019  Register result = ToRegister(instr->result());
3020  // x4 is expected in the associated deferred code and stub.
3021  Register map_check_site = x4;
3022  Register map = x5;
3023 
3024  // This instruction is marked as call. We can clobber any register.
3025  ASSERT(instr->IsMarkedAsCall());
3026 
3027  // We must take into account that object is in x11.
3028  ASSERT(object.Is(x11));
3029  Register scratch = x10;
3030 
3031  // A Smi is not instance of anything.
3032  __ JumpIfSmi(object, &return_false);
3033 
3034  // This is the inlined call site instanceof cache. The two occurences of the
3035  // hole value will be patched to the last map/result pair generated by the
3036  // instanceof stub.
3037  __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3038  {
3039  // Below we use Factory::the_hole_value() on purpose instead of loading from
3040  // the root array to force relocation and later be able to patch with a
3041  // custom value.
3042  InstructionAccurateScope scope(masm(), 5);
3043  __ bind(&map_check);
3044  // Will be patched with the cached map.
3045  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3046  __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
3047  __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
3048  __ cmp(map, scratch);
3049  __ b(&cache_miss, ne);
3050  // The address of this instruction is computed relative to the map check
3051  // above, so check the size of the code generated.
3052  ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
3053  // Will be patched with the cached result.
3054  __ LoadRelocated(result, Operand(factory()->the_hole_value()));
3055  }
3056  __ B(&done);
3057 
3058  // The inlined call site cache did not match.
3059  // Check null and string before calling the deferred code.
3060  __ Bind(&cache_miss);
3061  // Compute the address of the map check. It must not be clobbered until the
3062  // InstanceOfStub has used it.
3063  __ Adr(map_check_site, &map_check);
3064  // Null is not instance of anything.
3065  __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
3066 
3067  // String values are not instances of anything.
3068  // Return false if the object is a string. Otherwise, jump to the deferred
3069  // code.
3070  // Note that we can't jump directly to deferred code from
3071  // IsObjectJSStringType, because it uses tbz for the jump and the deferred
3072  // code can be out of range.
3073  __ IsObjectJSStringType(object, scratch, NULL, &return_false);
3074  __ B(deferred->entry());
3075 
3076  __ Bind(&return_false);
3077  __ LoadRoot(result, Heap::kFalseValueRootIndex);
3078 
3079  // Here result is either true or false.
3080  __ Bind(deferred->exit());
3081  __ Bind(&done);
3082 }
3083 
3084 
3085 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3086  Register result = ToRegister(instr->result());
3087  ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
3089  flags = static_cast<InstanceofStub::Flags>(
3091  flags = static_cast<InstanceofStub::Flags>(
3093  flags = static_cast<InstanceofStub::Flags>(
3095 
3096  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3097  LoadContextFromDeferred(instr->context());
3098 
3099  // Prepare InstanceofStub arguments.
3100  ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
3101  __ LoadObject(InstanceofStub::right(), instr->function());
3102 
3103  InstanceofStub stub(flags);
3104  CallCodeGeneric(stub.GetCode(isolate()),
3105  RelocInfo::CODE_TARGET,
3106  instr,
3107  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3108  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3109  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3110 
3111  // Put the result value into the result register slot.
3112  __ StoreToSafepointRegisterSlot(result, result);
3113 }
3114 
3115 
3116 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3117  DoGap(instr);
3118 }
3119 
3120 
3121 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3122  Register value = ToRegister32(instr->value());
3123  DoubleRegister result = ToDoubleRegister(instr->result());
3124  __ Scvtf(result, value);
3125 }
3126 
3127 
3128 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3129  ASSERT(ToRegister(instr->context()).is(cp));
3130  // The function is required to be in x1.
3131  ASSERT(ToRegister(instr->function()).is(x1));
3132  ASSERT(instr->HasPointerMap());
3133 
3134  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3135  if (known_function.is_null()) {
3136  LPointerMap* pointers = instr->pointer_map();
3137  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3138  ParameterCount count(instr->arity());
3139  __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3140  } else {
3141  CallKnownFunction(known_function,
3142  instr->hydrogen()->formal_parameter_count(),
3143  instr->arity(),
3144  instr,
3145  x1);
3146  }
3147 }
3148 
3149 
3150 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3151  Register temp1 = ToRegister(instr->temp1());
3152  Register temp2 = ToRegister(instr->temp2());
3153 
3154  // Get the frame pointer for the calling frame.
3156 
3157  // Skip the arguments adaptor frame if it exists.
3158  Label check_frame_marker;
3161  __ B(ne, &check_frame_marker);
3163 
3164  // Check the marker in the calling frame.
3165  __ Bind(&check_frame_marker);
3167 
3168  EmitCompareAndBranch(
3169  instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3170 }
3171 
3172 
3173 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3174  Label* is_object = instr->TrueLabel(chunk_);
3175  Label* is_not_object = instr->FalseLabel(chunk_);
3176  Register value = ToRegister(instr->value());
3177  Register map = ToRegister(instr->temp1());
3178  Register scratch = ToRegister(instr->temp2());
3179 
3180  __ JumpIfSmi(value, is_not_object);
3181  __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3182 
3183  __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3184 
3185  // Check for undetectable objects.
3186  __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3187  __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3188 
3189  // Check that instance type is in object type range.
3190  __ IsInstanceJSObjectType(map, scratch, NULL);
3191  // Flags have been updated by IsInstanceJSObjectType. We can now test the
3192  // flags for "le" condition to check if the object's type is a valid
3193  // JS object type.
3194  EmitBranch(instr, le);
3195 }
3196 
3197 
3198 Condition LCodeGen::EmitIsString(Register input,
3199  Register temp1,
3200  Label* is_not_string,
3201  SmiCheck check_needed = INLINE_SMI_CHECK) {
3202  if (check_needed == INLINE_SMI_CHECK) {
3203  __ JumpIfSmi(input, is_not_string);
3204  }
3205  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3206 
3207  return lt;
3208 }
3209 
3210 
3211 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3212  Register val = ToRegister(instr->value());
3213  Register scratch = ToRegister(instr->temp());
3214 
3215  SmiCheck check_needed =
3216  instr->hydrogen()->value()->IsHeapObject()
3218  Condition true_cond =
3219  EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3220 
3221  EmitBranch(instr, true_cond);
3222 }
3223 
3224 
3225 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3226  Register value = ToRegister(instr->value());
3227  STATIC_ASSERT(kSmiTag == 0);
3228  EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3229 }
3230 
3231 
3232 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3233  Register input = ToRegister(instr->value());
3234  Register temp = ToRegister(instr->temp());
3235 
3236  if (!instr->hydrogen()->value()->IsHeapObject()) {
3237  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3238  }
3239  __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3240  __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3241 
3242  EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3243 }
3244 
3245 
3246 static const char* LabelType(LLabel* label) {
3247  if (label->is_loop_header()) return " (loop header)";
3248  if (label->is_osr_entry()) return " (OSR entry)";
3249  return "";
3250 }
3251 
3252 
3253 void LCodeGen::DoLabel(LLabel* label) {
3254  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3255  current_instruction_,
3256  label->hydrogen_value()->id(),
3257  label->block_id(),
3258  LabelType(label));
3259 
3260  __ Bind(label->label());
3261  current_block_ = label->block_id();
3262  DoGap(label);
3263 }
3264 
3265 
3266 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3267  Register context = ToRegister(instr->context());
3268  Register result = ToRegister(instr->result());
3269  __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3270  if (instr->hydrogen()->RequiresHoleCheck()) {
3271  if (instr->hydrogen()->DeoptimizesOnHole()) {
3272  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3273  instr->environment());
3274  } else {
3275  Label not_the_hole;
3276  __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
3277  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3278  __ Bind(&not_the_hole);
3279  }
3280  }
3281 }
3282 
3283 
3284 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3285  Register function = ToRegister(instr->function());
3286  Register result = ToRegister(instr->result());
3287  Register temp = ToRegister(instr->temp());
3288  Label deopt;
3289 
3290  // Check that the function really is a function. Leaves map in the result
3291  // register.
3292  __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt);
3293 
3294  // Make sure that the function has an instance prototype.
3295  Label non_instance;
3296  __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
3297  __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
3298 
3299  // Get the prototype or initial map from the function.
3300  __ Ldr(result, FieldMemOperand(function,
3302 
3303  // Check that the function has a prototype or an initial map.
3304  __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
3305 
3306  // If the function does not have an initial map, we're done.
3307  Label done;
3308  __ CompareObjectType(result, temp, temp, MAP_TYPE);
3309  __ B(ne, &done);
3310 
3311  // Get the prototype from the initial map.
3312  __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3313  __ B(&done);
3314 
3315  // Non-instance prototype: fetch prototype from constructor field in initial
3316  // map.
3317  __ Bind(&non_instance);
3318  __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3319  __ B(&done);
3320 
3321  // Deoptimize case.
3322  __ Bind(&deopt);
3323  Deoptimize(instr->environment());
3324 
3325  // All done.
3326  __ Bind(&done);
3327 }
3328 
3329 
3330 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3331  Register result = ToRegister(instr->result());
3332  __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3333  __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3334  if (instr->hydrogen()->RequiresHoleCheck()) {
3335  DeoptimizeIfRoot(
3336  result, Heap::kTheHoleValueRootIndex, instr->environment());
3337  }
3338 }
3339 
3340 
3341 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3342  ASSERT(ToRegister(instr->context()).is(cp));
3343  ASSERT(ToRegister(instr->global_object()).Is(x0));
3344  ASSERT(ToRegister(instr->result()).Is(x0));
3345  __ Mov(x2, Operand(instr->name()));
3346  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3347  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
3348  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3349 }
3350 
3351 
3352 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3353  Register key,
3354  Register base,
3355  Register scratch,
3356  bool key_is_smi,
3357  bool key_is_constant,
3358  int constant_key,
3359  ElementsKind elements_kind,
3360  int additional_index) {
3361  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3362  int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
3364  : 0;
3365 
3366  if (key_is_constant) {
3367  int base_offset = ((constant_key + additional_index) << element_size_shift);
3368  return MemOperand(base, base_offset + additional_offset);
3369  }
3370 
3371  if (additional_index == 0) {
3372  if (key_is_smi) {
3373  // Key is smi: untag, and scale by element size.
3374  __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3375  return MemOperand(scratch, additional_offset);
3376  } else {
3377  // Key is not smi, and element size is not byte: scale by element size.
3378  if (additional_offset == 0) {
3379  return MemOperand(base, key, SXTW, element_size_shift);
3380  } else {
3381  __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
3382  return MemOperand(scratch, additional_offset);
3383  }
3384  }
3385  } else {
3386  // TODO(all): Try to combine these cases a bit more intelligently.
3387  if (additional_offset == 0) {
3388  if (key_is_smi) {
3389  __ SmiUntag(scratch, key);
3390  __ Add(scratch.W(), scratch.W(), additional_index);
3391  } else {
3392  __ Add(scratch.W(), key.W(), additional_index);
3393  }
3394  return MemOperand(base, scratch, LSL, element_size_shift);
3395  } else {
3396  if (key_is_smi) {
3397  __ Add(scratch, base,
3398  Operand::UntagSmiAndScale(key, element_size_shift));
3399  } else {
3400  __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
3401  }
3402  return MemOperand(
3403  scratch,
3404  (additional_index << element_size_shift) + additional_offset);
3405  }
3406  }
3407 }
3408 
3409 
3410 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3411  Register ext_ptr = ToRegister(instr->elements());
3412  Register scratch;
3413  ElementsKind elements_kind = instr->elements_kind();
3414 
3415  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3416  bool key_is_constant = instr->key()->IsConstantOperand();
3417  Register key = no_reg;
3418  int constant_key = 0;
3419  if (key_is_constant) {
3420  ASSERT(instr->temp() == NULL);
3421  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3422  if (constant_key & 0xf0000000) {
3423  Abort(kArrayIndexConstantValueTooBig);
3424  }
3425  } else {
3426  scratch = ToRegister(instr->temp());
3427  key = ToRegister(instr->key());
3428  }
3429 
3430  MemOperand mem_op =
3431  PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3432  key_is_constant, constant_key,
3433  elements_kind,
3434  instr->additional_index());
3435 
3436  if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3437  (elements_kind == FLOAT32_ELEMENTS)) {
3438  DoubleRegister result = ToDoubleRegister(instr->result());
3439  __ Ldr(result.S(), mem_op);
3440  __ Fcvt(result, result.S());
3441  } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3442  (elements_kind == FLOAT64_ELEMENTS)) {
3443  DoubleRegister result = ToDoubleRegister(instr->result());
3444  __ Ldr(result, mem_op);
3445  } else {
3446  Register result = ToRegister(instr->result());
3447 
3448  switch (elements_kind) {
3450  case INT8_ELEMENTS:
3451  __ Ldrsb(result, mem_op);
3452  break;
3455  case UINT8_ELEMENTS:
3457  __ Ldrb(result, mem_op);
3458  break;
3460  case INT16_ELEMENTS:
3461  __ Ldrsh(result, mem_op);
3462  break;
3464  case UINT16_ELEMENTS:
3465  __ Ldrh(result, mem_op);
3466  break;
3468  case INT32_ELEMENTS:
3469  __ Ldrsw(result, mem_op);
3470  break;
3472  case UINT32_ELEMENTS:
3473  __ Ldr(result.W(), mem_op);
3474  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3475  // Deopt if value > 0x80000000.
3476  __ Tst(result, 0xFFFFFFFF80000000);
3477  DeoptimizeIf(ne, instr->environment());
3478  }
3479  break;
3480  case FLOAT32_ELEMENTS:
3481  case FLOAT64_ELEMENTS:
3485  case FAST_HOLEY_ELEMENTS:
3487  case FAST_DOUBLE_ELEMENTS:
3488  case FAST_ELEMENTS:
3489  case FAST_SMI_ELEMENTS:
3490  case DICTIONARY_ELEMENTS:
3492  UNREACHABLE();
3493  break;
3494  }
3495  }
3496 }
3497 
3498 
3499 void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
3500  Register elements,
3501  Register key,
3502  bool key_is_tagged,
3503  ElementsKind elements_kind) {
3504  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3505 
3506  // Even though the HLoad/StoreKeyed instructions force the input
3507  // representation for the key to be an integer, the input gets replaced during
3508  // bounds check elimination with the index argument to the bounds check, which
3509  // can be tagged, so that case must be handled here, too.
3510  if (key_is_tagged) {
3511  __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3512  } else {
3513  // Sign extend key because it could be a 32-bit negative value or contain
3514  // garbage in the top 32-bits. The address computation happens in 64-bit.
3515  ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
3516  __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3517  }
3518 }
3519 
3520 
3521 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3522  Register elements = ToRegister(instr->elements());
3523  DoubleRegister result = ToDoubleRegister(instr->result());
3524  Register load_base;
3525  int offset = 0;
3526 
3527  if (instr->key()->IsConstantOperand()) {
3528  ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
3529  (instr->temp() == NULL));
3530 
3531  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3532  if (constant_key & 0xf0000000) {
3533  Abort(kArrayIndexConstantValueTooBig);
3534  }
3535  offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
3536  instr->additional_index());
3537  load_base = elements;
3538  } else {
3539  load_base = ToRegister(instr->temp());
3540  Register key = ToRegister(instr->key());
3541  bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3542  CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3543  instr->hydrogen()->elements_kind());
3544  offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
3545  }
3546  __ Ldr(result, FieldMemOperand(load_base, offset));
3547 
3548  if (instr->hydrogen()->RequiresHoleCheck()) {
3549  Register scratch = ToRegister(instr->temp());
3550 
3551  // TODO(all): Is it faster to reload this value to an integer register, or
3552  // move from fp to integer?
3553  __ Fmov(scratch, result);
3554  __ Cmp(scratch, kHoleNanInt64);
3555  DeoptimizeIf(eq, instr->environment());
3556  }
3557 }
3558 
3559 
3560 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3561  Register elements = ToRegister(instr->elements());
3562  Register result = ToRegister(instr->result());
3563  Register load_base;
3564  int offset = 0;
3565 
3566  if (instr->key()->IsConstantOperand()) {
3567  ASSERT(instr->temp() == NULL);
3568  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3569  offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3570  instr->additional_index());
3571  load_base = elements;
3572  } else {
3573  load_base = ToRegister(instr->temp());
3574  Register key = ToRegister(instr->key());
3575  bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3576  CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3577  instr->hydrogen()->elements_kind());
3578  offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3579  }
3580  Representation representation = instr->hydrogen()->representation();
3581 
3582  if (representation.IsInteger32() &&
3583  instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
3584  STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
3585  __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
3587  } else {
3588  __ Load(result, FieldMemOperand(load_base, offset),
3589  representation);
3590  }
3591 
3592  if (instr->hydrogen()->RequiresHoleCheck()) {
3593  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3594  DeoptimizeIfNotSmi(result, instr->environment());
3595  } else {
3596  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3597  instr->environment());
3598  }
3599  }
3600 }
3601 
3602 
3603 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3604  ASSERT(ToRegister(instr->context()).is(cp));
3605  ASSERT(ToRegister(instr->object()).Is(x1));
3606  ASSERT(ToRegister(instr->key()).Is(x0));
3607 
3608  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3609  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3610 
3611  ASSERT(ToRegister(instr->result()).Is(x0));
3612 }
3613 
3614 
3615 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3616  HObjectAccess access = instr->hydrogen()->access();
3617  int offset = access.offset();
3618  Register object = ToRegister(instr->object());
3619 
3620  if (access.IsExternalMemory()) {
3621  Register result = ToRegister(instr->result());
3622  __ Load(result, MemOperand(object, offset), access.representation());
3623  return;
3624  }
3625 
3626  if (instr->hydrogen()->representation().IsDouble()) {
3627  FPRegister result = ToDoubleRegister(instr->result());
3628  __ Ldr(result, FieldMemOperand(object, offset));
3629  return;
3630  }
3631 
3632  Register result = ToRegister(instr->result());
3633  Register source;
3634  if (access.IsInobject()) {
3635  source = object;
3636  } else {
3637  // Load the properties array, using result as a scratch register.
3638  __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3639  source = result;
3640  }
3641 
3642  if (access.representation().IsSmi() &&
3643  instr->hydrogen()->representation().IsInteger32()) {
3644  // Read int value directly from upper half of the smi.
3645  STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
3646  __ Load(result, UntagSmiFieldMemOperand(source, offset),
3648  } else {
3649  __ Load(result, FieldMemOperand(source, offset), access.representation());
3650  }
3651 }
3652 
3653 
3654 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3655  ASSERT(ToRegister(instr->context()).is(cp));
3656  // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
3657  ASSERT(ToRegister(instr->object()).is(x0));
3658  __ Mov(x2, Operand(instr->name()));
3659 
3660  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3661  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3662 
3663  ASSERT(ToRegister(instr->result()).is(x0));
3664 }
3665 
3666 
3667 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3668  Register result = ToRegister(instr->result());
3669  __ LoadRoot(result, instr->index());
3670 }
3671 
3672 
3673 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3674  Register result = ToRegister(instr->result());
3675  Register map = ToRegister(instr->value());
3676  __ EnumLengthSmi(result, map);
3677 }
3678 
3679 
3680 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3681  Representation r = instr->hydrogen()->value()->representation();
3682  if (r.IsDouble()) {
3683  DoubleRegister input = ToDoubleRegister(instr->value());
3684  DoubleRegister result = ToDoubleRegister(instr->result());
3685  __ Fabs(result, input);
3686  } else if (r.IsSmi() || r.IsInteger32()) {
3687  Register input = r.IsSmi() ? ToRegister(instr->value())
3688  : ToRegister32(instr->value());
3689  Register result = r.IsSmi() ? ToRegister(instr->result())
3690  : ToRegister32(instr->result());
3691  Label done;
3692  __ Abs(result, input, NULL, &done);
3693  Deoptimize(instr->environment());
3694  __ Bind(&done);
3695  }
3696 }
3697 
3698 
3699 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3700  Label* exit,
3701  Label* allocation_entry) {
3702  // Handle the tricky cases of MathAbsTagged:
3703  // - HeapNumber inputs.
3704  // - Negative inputs produce a positive result, so a new HeapNumber is
3705  // allocated to hold it.
3706  // - Positive inputs are returned as-is, since there is no need to allocate
3707  // a new HeapNumber for the result.
3708  // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3709  // a smi. In this case, the inline code sets the result and jumps directly
3710  // to the allocation_entry label.
3711  ASSERT(instr->context() != NULL);
3712  ASSERT(ToRegister(instr->context()).is(cp));
3713  Register input = ToRegister(instr->value());
3714  Register temp1 = ToRegister(instr->temp1());
3715  Register temp2 = ToRegister(instr->temp2());
3716  Register result_bits = ToRegister(instr->temp3());
3717  Register result = ToRegister(instr->result());
3718 
3719  Label runtime_allocation;
3720 
3721  // Deoptimize if the input is not a HeapNumber.
3722  __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
3723  DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
3724  instr->environment());
3725 
3726  // If the argument is positive, we can return it as-is, without any need to
3727  // allocate a new HeapNumber for the result. We have to do this in integer
3728  // registers (rather than with fabs) because we need to be able to distinguish
3729  // the two zeroes.
3730  __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3731  __ Mov(result, input);
3732  __ Tbz(result_bits, kXSignBit, exit);
3733 
3734  // Calculate abs(input) by clearing the sign bit.
3735  __ Bic(result_bits, result_bits, kXSignMask);
3736 
3737  // Allocate a new HeapNumber to hold the result.
3738  // result_bits The bit representation of the (double) result.
3739  __ Bind(allocation_entry);
3740  __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3741  // The inline (non-deferred) code will store result_bits into result.
3742  __ B(exit);
3743 
3744  __ Bind(&runtime_allocation);
3745  if (FLAG_debug_code) {
3746  // Because result is in the pointer map, we need to make sure it has a valid
3747  // tagged value before we call the runtime. We speculatively set it to the
3748  // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3749  // be valid.
3750  Label result_ok;
3751  Register input = ToRegister(instr->value());
3752  __ JumpIfSmi(result, &result_ok);
3753  __ Cmp(input, result);
3754  __ Assert(eq, kUnexpectedValue);
3755  __ Bind(&result_ok);
3756  }
3757 
3758  { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3759  CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3760  instr->context());
3761  __ StoreToSafepointRegisterSlot(x0, result);
3762  }
3763  // The inline (non-deferred) code will store result_bits into result.
3764 }
3765 
3766 
3767 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3768  // Class for deferred case.
3769  class DeferredMathAbsTagged: public LDeferredCode {
3770  public:
3771  DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3772  : LDeferredCode(codegen), instr_(instr) { }
3773  virtual void Generate() {
3774  codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3775  allocation_entry());
3776  }
3777  virtual LInstruction* instr() { return instr_; }
3778  Label* allocation_entry() { return &allocation; }
3779  private:
3780  LMathAbsTagged* instr_;
3781  Label allocation;
3782  };
3783 
3784  // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3785  // in GenerateDeferredCode. Tidy this up.
3787 
3788  DeferredMathAbsTagged* deferred =
3789  new(zone()) DeferredMathAbsTagged(this, instr);
3790 
3791  ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
3792  instr->hydrogen()->value()->representation().IsSmi());
3793  Register input = ToRegister(instr->value());
3794  Register result_bits = ToRegister(instr->temp3());
3795  Register result = ToRegister(instr->result());
3796  Label done;
3797 
3798  // Handle smis inline.
3799  // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3800  // never get set by the negation. This is therefore the same as the Integer32
3801  // case in DoMathAbs, except that it operates on 64-bit values.
3802  STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3803 
3804  __ JumpIfNotSmi(input, deferred->entry());
3805 
3806  __ Abs(result, input, NULL, &done);
3807 
3808  // The result is the magnitude (abs) of the smallest value a smi can
3809  // represent, encoded as a double.
3810  __ Mov(result_bits, double_to_rawbits(0x80000000));
3811  __ B(deferred->allocation_entry());
3812 
3813  __ Bind(deferred->exit());
3814  __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3815 
3816  __ Bind(&done);
3817 }
3818 
3819 
3820 void LCodeGen::DoMathExp(LMathExp* instr) {
3821  DoubleRegister input = ToDoubleRegister(instr->value());
3822  DoubleRegister result = ToDoubleRegister(instr->result());
3823  DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3824  DoubleRegister double_temp2 = double_scratch();
3825  Register temp1 = ToRegister(instr->temp1());
3826  Register temp2 = ToRegister(instr->temp2());
3827  Register temp3 = ToRegister(instr->temp3());
3828 
3829  MathExpGenerator::EmitMathExp(masm(), input, result,
3830  double_temp1, double_temp2,
3831  temp1, temp2, temp3);
3832 }
3833 
3834 
3835 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3836  // TODO(jbramley): If we could provide a double result, we could use frintm
3837  // and produce a valid double result in a single instruction.
3838  DoubleRegister input = ToDoubleRegister(instr->value());
3839  Register result = ToRegister(instr->result());
3840 
3841  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3842  DeoptimizeIfMinusZero(input, instr->environment());
3843  }
3844 
3845  __ Fcvtms(result, input);
3846 
3847  // Check that the result fits into a 32-bit integer.
3848  // - The result did not overflow.
3849  __ Cmp(result, Operand(result, SXTW));
3850  // - The input was not NaN.
3851  __ Fccmp(input, input, NoFlag, eq);
3852  DeoptimizeIf(ne, instr->environment());
3853 }
3854 
3855 
3856 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3857  Register dividend = ToRegister32(instr->dividend());
3858  Register result = ToRegister32(instr->result());
3859  int32_t divisor = instr->divisor();
3860 
3861  // If the divisor is positive, things are easy: There can be no deopts and we
3862  // can simply do an arithmetic right shift.
3863  if (divisor == 1) return;
3864  int32_t shift = WhichPowerOf2Abs(divisor);
3865  if (divisor > 1) {
3866  __ Mov(result, Operand(dividend, ASR, shift));
3867  return;
3868  }
3869 
3870  // If the divisor is negative, we have to negate and handle edge cases.
3871  Label not_kmin_int, done;
3872  __ Negs(result, dividend);
3873  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3874  DeoptimizeIf(eq, instr->environment());
3875  }
3876  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3877  // Note that we could emit branch-free code, but that would need one more
3878  // register.
3879  if (divisor == -1) {
3880  DeoptimizeIf(vs, instr->environment());
3881  } else {
3882  __ B(vc, &not_kmin_int);
3883  __ Mov(result, kMinInt / divisor);
3884  __ B(&done);
3885  }
3886  }
3887  __ bind(&not_kmin_int);
3888  __ Mov(result, Operand(dividend, ASR, shift));
3889  __ bind(&done);
3890 }
3891 
3892 
3893 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3894  Register dividend = ToRegister32(instr->dividend());
3895  int32_t divisor = instr->divisor();
3896  Register result = ToRegister32(instr->result());
3897  ASSERT(!AreAliased(dividend, result));
3898 
3899  if (divisor == 0) {
3900  Deoptimize(instr->environment());
3901  return;
3902  }
3903 
3904  // Check for (0 / -x) that will produce negative zero.
3905  HMathFloorOfDiv* hdiv = instr->hydrogen();
3906  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3907  __ Cmp(dividend, 0);
3908  DeoptimizeIf(eq, instr->environment());
3909  }
3910 
3911  // Easy case: We need no dynamic check for the dividend and the flooring
3912  // division is the same as the truncating division.
3913  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3914  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3915  __ TruncatingDiv(result, dividend, Abs(divisor));
3916  if (divisor < 0) __ Neg(result, result);
3917  return;
3918  }
3919 
3920  // In the general case we may need to adjust before and after the truncating
3921  // division to get a flooring division.
3922  Register temp = ToRegister32(instr->temp());
3923  ASSERT(!AreAliased(temp, dividend, result));
3924  Label needs_adjustment, done;
3925  __ Cmp(dividend, 0);
3926  __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3927  __ TruncatingDiv(result, dividend, Abs(divisor));
3928  if (divisor < 0) __ Neg(result, result);
3929  __ B(&done);
3930  __ bind(&needs_adjustment);
3931  __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3932  __ TruncatingDiv(result, temp, Abs(divisor));
3933  if (divisor < 0) __ Neg(result, result);
3934  __ Sub(result, result, Operand(1));
3935  __ bind(&done);
3936 }
3937 
3938 
3939 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
3940  Register dividend = ToRegister32(instr->dividend());
3941  Register divisor = ToRegister32(instr->divisor());
3942  Register remainder = ToRegister32(instr->temp());
3943  Register result = ToRegister32(instr->result());
3944 
3945  // This can't cause an exception on ARM, so we can speculatively
3946  // execute it already now.
3947  __ Sdiv(result, dividend, divisor);
3948 
3949  // Check for x / 0.
3950  DeoptimizeIfZero(divisor, instr->environment());
3951 
3952  // Check for (kMinInt / -1).
3953  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
3954  // The V flag will be set iff dividend == kMinInt.
3955  __ Cmp(dividend, 1);
3956  __ Ccmp(divisor, -1, NoFlag, vs);
3957  DeoptimizeIf(eq, instr->environment());
3958  }
3959 
3960  // Check for (0 / -x) that will produce negative zero.
3961  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3962  __ Cmp(divisor, 0);
3963  __ Ccmp(dividend, 0, ZFlag, mi);
3964  // "divisor" can't be null because the code would have already been
3965  // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
3966  // In this case we need to deoptimize to produce a -0.
3967  DeoptimizeIf(eq, instr->environment());
3968  }
3969 
3970  Label done;
3971  // If both operands have the same sign then we are done.
3972  __ Eor(remainder, dividend, divisor);
3973  __ Tbz(remainder, kWSignBit, &done);
3974 
3975  // Check if the result needs to be corrected.
3976  __ Msub(remainder, result, divisor, dividend);
3977  __ Cbz(remainder, &done);
3978  __ Sub(result, result, 1);
3979 
3980  __ Bind(&done);
3981 }
3982 
3983 
3984 void LCodeGen::DoMathLog(LMathLog* instr) {
3985  ASSERT(instr->IsMarkedAsCall());
3986  ASSERT(ToDoubleRegister(instr->value()).is(d0));
3987  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3988  0, 1);
3989  ASSERT(ToDoubleRegister(instr->result()).Is(d0));
3990 }
3991 
3992 
3993 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3994  Register input = ToRegister32(instr->value());
3995  Register result = ToRegister32(instr->result());
3996  __ Clz(result, input);
3997 }
3998 
3999 
4000 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4001  DoubleRegister input = ToDoubleRegister(instr->value());
4002  DoubleRegister result = ToDoubleRegister(instr->result());
4003  Label done;
4004 
4005  // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
4006  // Math.pow(-Infinity, 0.5) == +Infinity
4007  // Math.pow(-0.0, 0.5) == +0.0
4008 
4009  // Catch -infinity inputs first.
4010  // TODO(jbramley): A constant infinity register would be helpful here.
4011  __ Fmov(double_scratch(), kFP64NegativeInfinity);
4012  __ Fcmp(double_scratch(), input);
4013  __ Fabs(result, input);
4014  __ B(&done, eq);
4015 
4016  // Add +0.0 to convert -0.0 to +0.0.
4017  __ Fadd(double_scratch(), input, fp_zero);
4018  __ Fsqrt(result, double_scratch());
4019 
4020  __ Bind(&done);
4021 }
4022 
4023 
4024 void LCodeGen::DoPower(LPower* instr) {
4025  Representation exponent_type = instr->hydrogen()->right()->representation();
4026  // Having marked this as a call, we can use any registers.
4027  // Just make sure that the input/output registers are the expected ones.
4028  ASSERT(!instr->right()->IsDoubleRegister() ||
4029  ToDoubleRegister(instr->right()).is(d1));
4030  ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4031  ToRegister(instr->right()).is(x11));
4032  ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
4033  ASSERT(ToDoubleRegister(instr->left()).is(d0));
4034  ASSERT(ToDoubleRegister(instr->result()).is(d0));
4035 
4036  if (exponent_type.IsSmi()) {
4037  MathPowStub stub(MathPowStub::TAGGED);
4038  __ CallStub(&stub);
4039  } else if (exponent_type.IsTagged()) {
4040  Label no_deopt;
4041  __ JumpIfSmi(x11, &no_deopt);
4042  __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
4043  DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
4044  instr->environment());
4045  __ Bind(&no_deopt);
4046  MathPowStub stub(MathPowStub::TAGGED);
4047  __ CallStub(&stub);
4048  } else if (exponent_type.IsInteger32()) {
4049  // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4050  // supports large integer exponents.
4051  Register exponent = ToRegister(instr->right());
4052  __ Sxtw(exponent, exponent);
4053  MathPowStub stub(MathPowStub::INTEGER);
4054  __ CallStub(&stub);
4055  } else {
4056  ASSERT(exponent_type.IsDouble());
4057  MathPowStub stub(MathPowStub::DOUBLE);
4058  __ CallStub(&stub);
4059  }
4060 }
4061 
4062 
4063 void LCodeGen::DoMathRound(LMathRound* instr) {
4064  // TODO(jbramley): We could provide a double result here using frint.
4065  DoubleRegister input = ToDoubleRegister(instr->value());
4066  DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
4067  Register result = ToRegister(instr->result());
4068  Label try_rounding;
4069  Label done;
4070 
4071  // Math.round() rounds to the nearest integer, with ties going towards
4072  // +infinity. This does not match any IEEE-754 rounding mode.
4073  // - Infinities and NaNs are propagated unchanged, but cause deopts because
4074  // they can't be represented as integers.
4075  // - The sign of the result is the same as the sign of the input. This means
4076  // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4077  // result of -0.0.
4078 
4079  DoubleRegister dot_five = double_scratch();
4080  __ Fmov(dot_five, 0.5);
4081  __ Fabs(temp1, input);
4082  __ Fcmp(temp1, dot_five);
4083  // If input is in [-0.5, -0], the result is -0.
4084  // If input is in [+0, +0.5[, the result is +0.
4085  // If the input is +0.5, the result is 1.
4086  __ B(hi, &try_rounding); // hi so NaN will also branch.
4087 
4088  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4089  __ Fmov(result, input);
4090  DeoptimizeIfNegative(result, instr->environment()); // [-0.5, -0.0].
4091  }
4092  __ Fcmp(input, dot_five);
4093  __ Mov(result, 1); // +0.5.
4094  // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
4095  // flag kBailoutOnMinusZero, will return 0 (xzr).
4096  __ Csel(result, result, xzr, eq);
4097  __ B(&done);
4098 
4099  __ Bind(&try_rounding);
4100  // Since we're providing a 32-bit result, we can implement ties-to-infinity by
4101  // adding 0.5 to the input, then taking the floor of the result. This does not
4102  // work for very large positive doubles because adding 0.5 would cause an
4103  // intermediate rounding stage, so a different approach will be necessary if a
4104  // double result is needed.
4105  __ Fadd(temp1, input, dot_five);
4106  __ Fcvtms(result, temp1);
4107 
4108  // Deopt if
4109  // * the input was NaN
4110  // * the result is not representable using a 32-bit integer.
4111  __ Fcmp(input, 0.0);
4112  __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
4113  DeoptimizeIf(ne, instr->environment());
4114 
4115  __ Bind(&done);
4116 }
4117 
4118 
4119 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4120  DoubleRegister input = ToDoubleRegister(instr->value());
4121  DoubleRegister result = ToDoubleRegister(instr->result());
4122  __ Fsqrt(result, input);
4123 }
4124 
4125 
4126 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4127  HMathMinMax::Operation op = instr->hydrogen()->operation();
4128  if (instr->hydrogen()->representation().IsInteger32()) {
4129  Register result = ToRegister32(instr->result());
4130  Register left = ToRegister32(instr->left());
4131  Operand right = ToOperand32I(instr->right());
4132 
4133  __ Cmp(left, right);
4134  __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4135  } else if (instr->hydrogen()->representation().IsSmi()) {
4136  Register result = ToRegister(instr->result());
4137  Register left = ToRegister(instr->left());
4138  Operand right = ToOperand(instr->right());
4139 
4140  __ Cmp(left, right);
4141  __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4142  } else {
4143  ASSERT(instr->hydrogen()->representation().IsDouble());
4144  DoubleRegister result = ToDoubleRegister(instr->result());
4145  DoubleRegister left = ToDoubleRegister(instr->left());
4146  DoubleRegister right = ToDoubleRegister(instr->right());
4147 
4148  if (op == HMathMinMax::kMathMax) {
4149  __ Fmax(result, left, right);
4150  } else {
4151  ASSERT(op == HMathMinMax::kMathMin);
4152  __ Fmin(result, left, right);
4153  }
4154  }
4155 }
4156 
4157 
4158 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4159  Register dividend = ToRegister32(instr->dividend());
4160  int32_t divisor = instr->divisor();
4161  ASSERT(dividend.is(ToRegister32(instr->result())));
4162 
4163  // Theoretically, a variation of the branch-free code for integer division by
4164  // a power of 2 (calculating the remainder via an additional multiplication
4165  // (which gets simplified to an 'and') and subtraction) should be faster, and
4166  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4167  // indicate that positive dividends are heavily favored, so the branching
4168  // version performs better.
4169  HMod* hmod = instr->hydrogen();
4170  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4171  Label dividend_is_not_negative, done;
4172  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4173  __ Cmp(dividend, 0);
4174  __ B(pl, &dividend_is_not_negative);
4175  // Note that this is correct even for kMinInt operands.
4176  __ Neg(dividend, dividend);
4177  __ And(dividend, dividend, mask);
4178  __ Negs(dividend, dividend);
4179  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4180  DeoptimizeIf(eq, instr->environment());
4181  }
4182  __ B(&done);
4183  }
4184 
4185  __ bind(&dividend_is_not_negative);
4186  __ And(dividend, dividend, mask);
4187  __ bind(&done);
4188 }
4189 
4190 
4191 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4192  Register dividend = ToRegister32(instr->dividend());
4193  int32_t divisor = instr->divisor();
4194  Register result = ToRegister32(instr->result());
4195  Register temp = ToRegister32(instr->temp());
4196  ASSERT(!AreAliased(dividend, result, temp));
4197 
4198  if (divisor == 0) {
4199  Deoptimize(instr->environment());
4200  return;
4201  }
4202 
4203  __ TruncatingDiv(result, dividend, Abs(divisor));
4204  __ Sxtw(dividend.X(), dividend);
4205  __ Mov(temp, Abs(divisor));
4206  __ Smsubl(result.X(), result, temp, dividend.X());
4207 
4208  // Check for negative zero.
4209  HMod* hmod = instr->hydrogen();
4210  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4211  Label remainder_not_zero;
4212  __ Cbnz(result, &remainder_not_zero);
4213  DeoptimizeIfNegative(dividend, instr->environment());
4214  __ bind(&remainder_not_zero);
4215  }
4216 }
4217 
4218 
4219 void LCodeGen::DoModI(LModI* instr) {
4220  Register dividend = ToRegister32(instr->left());
4221  Register divisor = ToRegister32(instr->right());
4222  Register result = ToRegister32(instr->result());
4223 
4224  Label deopt, done;
4225  // modulo = dividend - quotient * divisor
4226  __ Sdiv(result, dividend, divisor);
4227  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4228  // Combine the deoptimization sites.
4229  Label ok;
4230  __ Cbnz(divisor, &ok);
4231  __ Bind(&deopt);
4232  Deoptimize(instr->environment());
4233  __ Bind(&ok);
4234  }
4235  __ Msub(result, result, divisor, dividend);
4236  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4237  __ Cbnz(result, &done);
4238  if (deopt.is_bound()) { // TODO(all) This is a hack, remove this...
4239  __ Tbnz(dividend, kWSignBit, &deopt);
4240  } else {
4241  DeoptimizeIfNegative(dividend, instr->environment());
4242  }
4243  }
4244  __ Bind(&done);
4245 }
4246 
4247 
4248 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4249  ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
4250  bool is_smi = instr->hydrogen()->representation().IsSmi();
4251  Register result =
4252  is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4253  Register left =
4254  is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4255  int32_t right = ToInteger32(instr->right());
4256  ASSERT((right > -kMaxInt) || (right < kMaxInt));
4257 
4258  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4259  bool bailout_on_minus_zero =
4260  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4261 
4262  if (bailout_on_minus_zero) {
4263  if (right < 0) {
4264  // The result is -0 if right is negative and left is zero.
4265  DeoptimizeIfZero(left, instr->environment());
4266  } else if (right == 0) {
4267  // The result is -0 if the right is zero and the left is negative.
4268  DeoptimizeIfNegative(left, instr->environment());
4269  }
4270  }
4271 
4272  switch (right) {
4273  // Cases which can detect overflow.
4274  case -1:
4275  if (can_overflow) {
4276  // Only 0x80000000 can overflow here.
4277  __ Negs(result, left);
4278  DeoptimizeIf(vs, instr->environment());
4279  } else {
4280  __ Neg(result, left);
4281  }
4282  break;
4283  case 0:
4284  // This case can never overflow.
4285  __ Mov(result, 0);
4286  break;
4287  case 1:
4288  // This case can never overflow.
4289  __ Mov(result, left, kDiscardForSameWReg);
4290  break;
4291  case 2:
4292  if (can_overflow) {
4293  __ Adds(result, left, left);
4294  DeoptimizeIf(vs, instr->environment());
4295  } else {
4296  __ Add(result, left, left);
4297  }
4298  break;
4299 
4300  default:
4301  // Multiplication by constant powers of two (and some related values)
4302  // can be done efficiently with shifted operands.
4303  int32_t right_abs = Abs(right);
4304 
4305  if (IsPowerOf2(right_abs)) {
4306  int right_log2 = WhichPowerOf2(right_abs);
4307 
4308  if (can_overflow) {
4309  Register scratch = result;
4310  ASSERT(!AreAliased(scratch, left));
4311  __ Cls(scratch, left);
4312  __ Cmp(scratch, right_log2);
4313  DeoptimizeIf(lt, instr->environment());
4314  }
4315 
4316  if (right >= 0) {
4317  // result = left << log2(right)
4318  __ Lsl(result, left, right_log2);
4319  } else {
4320  // result = -left << log2(-right)
4321  if (can_overflow) {
4322  __ Negs(result, Operand(left, LSL, right_log2));
4323  DeoptimizeIf(vs, instr->environment());
4324  } else {
4325  __ Neg(result, Operand(left, LSL, right_log2));
4326  }
4327  }
4328  return;
4329  }
4330 
4331 
4332  // For the following cases, we could perform a conservative overflow check
4333  // with CLS as above. However the few cycles saved are likely not worth
4334  // the risk of deoptimizing more often than required.
4335  ASSERT(!can_overflow);
4336 
4337  if (right >= 0) {
4338  if (IsPowerOf2(right - 1)) {
4339  // result = left + left << log2(right - 1)
4340  __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4341  } else if (IsPowerOf2(right + 1)) {
4342  // result = -left + left << log2(right + 1)
4343  __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4344  __ Neg(result, result);
4345  } else {
4346  UNREACHABLE();
4347  }
4348  } else {
4349  if (IsPowerOf2(-right + 1)) {
4350  // result = left - left << log2(-right + 1)
4351  __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4352  } else if (IsPowerOf2(-right - 1)) {
4353  // result = -left - left << log2(-right - 1)
4354  __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4355  __ Neg(result, result);
4356  } else {
4357  UNREACHABLE();
4358  }
4359  }
4360  }
4361 }
4362 
4363 
4364 void LCodeGen::DoMulI(LMulI* instr) {
4365  Register result = ToRegister32(instr->result());
4366  Register left = ToRegister32(instr->left());
4367  Register right = ToRegister32(instr->right());
4368 
4369  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4370  bool bailout_on_minus_zero =
4371  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4372 
4373  if (bailout_on_minus_zero && !left.Is(right)) {
4374  // If one operand is zero and the other is negative, the result is -0.
4375  // - Set Z (eq) if either left or right, or both, are 0.
4376  __ Cmp(left, 0);
4377  __ Ccmp(right, 0, ZFlag, ne);
4378  // - If so (eq), set N (mi) if left + right is negative.
4379  // - Otherwise, clear N.
4380  __ Ccmn(left, right, NoFlag, eq);
4381  DeoptimizeIf(mi, instr->environment());
4382  }
4383 
4384  if (can_overflow) {
4385  __ Smull(result.X(), left, right);
4386  __ Cmp(result.X(), Operand(result, SXTW));
4387  DeoptimizeIf(ne, instr->environment());
4388  } else {
4389  __ Mul(result, left, right);
4390  }
4391 }
4392 
4393 
4394 void LCodeGen::DoMulS(LMulS* instr) {
4395  Register result = ToRegister(instr->result());
4396  Register left = ToRegister(instr->left());
4397  Register right = ToRegister(instr->right());
4398 
4399  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4400  bool bailout_on_minus_zero =
4401  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4402 
4403  if (bailout_on_minus_zero && !left.Is(right)) {
4404  // If one operand is zero and the other is negative, the result is -0.
4405  // - Set Z (eq) if either left or right, or both, are 0.
4406  __ Cmp(left, 0);
4407  __ Ccmp(right, 0, ZFlag, ne);
4408  // - If so (eq), set N (mi) if left + right is negative.
4409  // - Otherwise, clear N.
4410  __ Ccmn(left, right, NoFlag, eq);
4411  DeoptimizeIf(mi, instr->environment());
4412  }
4413 
4414  STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4415  if (can_overflow) {
4416  __ Smulh(result, left, right);
4417  __ Cmp(result, Operand(result.W(), SXTW));
4418  __ SmiTag(result);
4419  DeoptimizeIf(ne, instr->environment());
4420  } else {
4421  if (AreAliased(result, left, right)) {
4422  // All three registers are the same: half untag the input and then
4423  // multiply, giving a tagged result.
4424  STATIC_ASSERT((kSmiShift % 2) == 0);
4425  __ Asr(result, left, kSmiShift / 2);
4426  __ Mul(result, result, result);
4427  } else if (result.Is(left) && !left.Is(right)) {
4428  // Registers result and left alias, right is distinct: untag left into
4429  // result, and then multiply by right, giving a tagged result.
4430  __ SmiUntag(result, left);
4431  __ Mul(result, result, right);
4432  } else {
4433  ASSERT(!left.Is(result));
4434  // Registers result and right alias, left is distinct, or all registers
4435  // are distinct: untag right into result, and then multiply by left,
4436  // giving a tagged result.
4437  __ SmiUntag(result, right);
4438  __ Mul(result, left, result);
4439  }
4440  }
4441 }
4442 
4443 
4444 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4445  // TODO(3095996): Get rid of this. For now, we need to make the
4446  // result register contain a valid pointer because it is already
4447  // contained in the register pointer map.
4448  Register result = ToRegister(instr->result());
4449  __ Mov(result, 0);
4450 
4451  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4452  // NumberTagU and NumberTagD use the context from the frame, rather than
4453  // the environment's HContext or HInlinedContext value.
4454  // They only call Runtime::kHiddenAllocateHeapNumber.
4455  // The corresponding HChange instructions are added in a phase that does
4456  // not have easy access to the local context.
4458  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4459  RecordSafepointWithRegisters(
4460  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4461  __ StoreToSafepointRegisterSlot(x0, result);
4462 }
4463 
4464 
4465 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4466  class DeferredNumberTagD: public LDeferredCode {
4467  public:
4468  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4469  : LDeferredCode(codegen), instr_(instr) { }
4470  virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4471  virtual LInstruction* instr() { return instr_; }
4472  private:
4473  LNumberTagD* instr_;
4474  };
4475 
4476  DoubleRegister input = ToDoubleRegister(instr->value());
4477  Register result = ToRegister(instr->result());
4478  Register temp1 = ToRegister(instr->temp1());
4479  Register temp2 = ToRegister(instr->temp2());
4480 
4481  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4482  if (FLAG_inline_new) {
4483  __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4484  } else {
4485  __ B(deferred->entry());
4486  }
4487 
4488  __ Bind(deferred->exit());
4489  __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4490 }
4491 
4492 
4493 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4494  LOperand* value,
4495  LOperand* temp1,
4496  LOperand* temp2) {
4497  Label slow, convert_and_store;
4498  Register src = ToRegister32(value);
4499  Register dst = ToRegister(instr->result());
4500  Register scratch1 = ToRegister(temp1);
4501 
4502  if (FLAG_inline_new) {
4503  Register scratch2 = ToRegister(temp2);
4504  __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4505  __ B(&convert_and_store);
4506  }
4507 
4508  // Slow case: call the runtime system to do the number allocation.
4509  __ Bind(&slow);
4510  // TODO(3095996): Put a valid pointer value in the stack slot where the result
4511  // register is stored, as this register is in the pointer map, but contains an
4512  // integer value.
4513  __ Mov(dst, 0);
4514  {
4515  // Preserve the value of all registers.
4516  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4517 
4518  // NumberTagU and NumberTagD use the context from the frame, rather than
4519  // the environment's HContext or HInlinedContext value.
4520  // They only call Runtime::kHiddenAllocateHeapNumber.
4521  // The corresponding HChange instructions are added in a phase that does
4522  // not have easy access to the local context.
4524  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4525  RecordSafepointWithRegisters(
4526  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4527  __ StoreToSafepointRegisterSlot(x0, dst);
4528  }
4529 
4530  // Convert number to floating point and store in the newly allocated heap
4531  // number.
4532  __ Bind(&convert_and_store);
4533  DoubleRegister dbl_scratch = double_scratch();
4534  __ Ucvtf(dbl_scratch, src);
4535  __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4536 }
4537 
4538 
4539 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4540  class DeferredNumberTagU: public LDeferredCode {
4541  public:
4542  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4543  : LDeferredCode(codegen), instr_(instr) { }
4544  virtual void Generate() {
4545  codegen()->DoDeferredNumberTagU(instr_,
4546  instr_->value(),
4547  instr_->temp1(),
4548  instr_->temp2());
4549  }
4550  virtual LInstruction* instr() { return instr_; }
4551  private:
4552  LNumberTagU* instr_;
4553  };
4554 
4555  Register value = ToRegister32(instr->value());
4556  Register result = ToRegister(instr->result());
4557 
4558  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4559  __ Cmp(value, Smi::kMaxValue);
4560  __ B(hi, deferred->entry());
4561  __ SmiTag(result, value.X());
4562  __ Bind(deferred->exit());
4563 }
4564 
4565 
4566 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4567  Register input = ToRegister(instr->value());
4568  Register scratch = ToRegister(instr->temp());
4569  DoubleRegister result = ToDoubleRegister(instr->result());
4570  bool can_convert_undefined_to_nan =
4571  instr->hydrogen()->can_convert_undefined_to_nan();
4572 
4573  Label done, load_smi;
4574 
4575  // Work out what untag mode we're working with.
4576  HValue* value = instr->hydrogen()->value();
4577  NumberUntagDMode mode = value->representation().IsSmi()
4579 
4580  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4581  __ JumpIfSmi(input, &load_smi);
4582 
4583  Label convert_undefined;
4584 
4585  // Heap number map check.
4586  __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4587  if (can_convert_undefined_to_nan) {
4588  __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4589  &convert_undefined);
4590  } else {
4591  DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4592  instr->environment());
4593  }
4594 
4595  // Load heap number.
4596  __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4597  if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4598  DeoptimizeIfMinusZero(result, instr->environment());
4599  }
4600  __ B(&done);
4601 
4602  if (can_convert_undefined_to_nan) {
4603  __ Bind(&convert_undefined);
4604  DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
4605  instr->environment());
4606 
4607  __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4608  __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4609  __ B(&done);
4610  }
4611 
4612  } else {
4614  // Fall through to load_smi.
4615  }
4616 
4617  // Smi to double register conversion.
4618  __ Bind(&load_smi);
4619  __ SmiUntagToDouble(result, input);
4620 
4621  __ Bind(&done);
4622 }
4623 
4624 
4625 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4626  // This is a pseudo-instruction that ensures that the environment here is
4627  // properly registered for deoptimization and records the assembler's PC
4628  // offset.
4629  LEnvironment* environment = instr->environment();
4630 
4631  // If the environment were already registered, we would have no way of
4632  // backpatching it with the spill slot operands.
4633  ASSERT(!environment->HasBeenRegistered());
4634  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4635 
4636  GenerateOsrPrologue();
4637 }
4638 
4639 
4640 void LCodeGen::DoParameter(LParameter* instr) {
4641  // Nothing to do.
4642 }
4643 
4644 
4645 void LCodeGen::DoPushArgument(LPushArgument* instr) {
4646  LOperand* argument = instr->value();
4647  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
4648  Abort(kDoPushArgumentNotImplementedForDoubleType);
4649  } else {
4650  __ Push(ToRegister(argument));
4651  }
4652 }
4653 
4654 
4655 void LCodeGen::DoReturn(LReturn* instr) {
4656  if (FLAG_trace && info()->IsOptimizing()) {
4657  // Push the return value on the stack as the parameter.
4658  // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4659  // managed by the register allocator and tearing down the frame, it's
4660  // safe to write to the context register.
4661  __ Push(x0);
4663  __ CallRuntime(Runtime::kTraceExit, 1);
4664  }
4665 
4666  if (info()->saves_caller_doubles()) {
4667  RestoreCallerDoubles();
4668  }
4669 
4670  int no_frame_start = -1;
4671  if (NeedsEagerFrame()) {
4672  Register stack_pointer = masm()->StackPointer();
4673  __ Mov(stack_pointer, fp);
4674  no_frame_start = masm_->pc_offset();
4675  __ Pop(fp, lr);
4676  }
4677 
4678  if (instr->has_constant_parameter_count()) {
4679  int parameter_count = ToInteger32(instr->constant_parameter_count());
4680  __ Drop(parameter_count + 1);
4681  } else {
4682  Register parameter_count = ToRegister(instr->parameter_count());
4683  __ DropBySMI(parameter_count);
4684  }
4685  __ Ret();
4686 
4687  if (no_frame_start != -1) {
4688  info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4689  }
4690 }
4691 
4692 
4693 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4694  Register temp,
4695  LOperand* index,
4696  String::Encoding encoding) {
4697  if (index->IsConstantOperand()) {
4698  int offset = ToInteger32(LConstantOperand::cast(index));
4699  if (encoding == String::TWO_BYTE_ENCODING) {
4700  offset *= kUC16Size;
4701  }
4702  STATIC_ASSERT(kCharSize == 1);
4703  return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4704  }
4705 
4706  if (encoding == String::ONE_BYTE_ENCODING) {
4707  __ Add(temp, string, Operand(ToRegister32(index), SXTW));
4708  } else {
4709  STATIC_ASSERT(kUC16Size == 2);
4710  __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1));
4711  }
4713 }
4714 
4715 
4716 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4717  String::Encoding encoding = instr->hydrogen()->encoding();
4718  Register string = ToRegister(instr->string());
4719  Register result = ToRegister(instr->result());
4720  Register temp = ToRegister(instr->temp());
4721 
4722  if (FLAG_debug_code) {
4723  // Even though this lithium instruction comes with a temp register, we
4724  // can't use it here because we want to use "AtStart" constraints on the
4725  // inputs and the debug code here needs a scratch register.
4726  UseScratchRegisterScope temps(masm());
4727  Register dbg_temp = temps.AcquireX();
4728 
4729  __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4730  __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4731 
4732  __ And(dbg_temp, dbg_temp,
4734  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4735  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4736  __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4737  ? one_byte_seq_type : two_byte_seq_type));
4738  __ Check(eq, kUnexpectedStringType);
4739  }
4740 
4741  MemOperand operand =
4742  BuildSeqStringOperand(string, temp, instr->index(), encoding);
4743  if (encoding == String::ONE_BYTE_ENCODING) {
4744  __ Ldrb(result, operand);
4745  } else {
4746  __ Ldrh(result, operand);
4747  }
4748 }
4749 
4750 
4751 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4752  String::Encoding encoding = instr->hydrogen()->encoding();
4753  Register string = ToRegister(instr->string());
4754  Register value = ToRegister(instr->value());
4755  Register temp = ToRegister(instr->temp());
4756 
4757  if (FLAG_debug_code) {
4758  ASSERT(ToRegister(instr->context()).is(cp));
4759  Register index = ToRegister(instr->index());
4760  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4761  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4762  int encoding_mask =
4763  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4764  ? one_byte_seq_type : two_byte_seq_type;
4765  __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4766  encoding_mask);
4767  }
4768  MemOperand operand =
4769  BuildSeqStringOperand(string, temp, instr->index(), encoding);
4770  if (encoding == String::ONE_BYTE_ENCODING) {
4771  __ Strb(value, operand);
4772  } else {
4773  __ Strh(value, operand);
4774  }
4775 }
4776 
4777 
4778 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4779  HChange* hchange = instr->hydrogen();
4780  Register input = ToRegister(instr->value());
4781  Register output = ToRegister(instr->result());
4782  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4783  hchange->value()->CheckFlag(HValue::kUint32)) {
4784  DeoptimizeIfNegative(input.W(), instr->environment());
4785  }
4786  __ SmiTag(output, input);
4787 }
4788 
4789 
4790 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4791  Register input = ToRegister(instr->value());
4792  Register result = ToRegister(instr->result());
4793  Label done, untag;
4794 
4795  if (instr->needs_check()) {
4796  DeoptimizeIfNotSmi(input, instr->environment());
4797  }
4798 
4799  __ Bind(&untag);
4800  __ SmiUntag(result, input);
4801  __ Bind(&done);
4802 }
4803 
4804 
4805 void LCodeGen::DoShiftI(LShiftI* instr) {
4806  LOperand* right_op = instr->right();
4807  Register left = ToRegister32(instr->left());
4808  Register result = ToRegister32(instr->result());
4809 
4810  if (right_op->IsRegister()) {
4811  Register right = ToRegister32(instr->right());
4812  switch (instr->op()) {
4813  case Token::ROR: __ Ror(result, left, right); break;
4814  case Token::SAR: __ Asr(result, left, right); break;
4815  case Token::SHL: __ Lsl(result, left, right); break;
4816  case Token::SHR:
4817  if (instr->can_deopt()) {
4818  Label right_not_zero;
4819  __ Cbnz(right, &right_not_zero);
4820  DeoptimizeIfNegative(left, instr->environment());
4821  __ Bind(&right_not_zero);
4822  }
4823  __ Lsr(result, left, right);
4824  break;
4825  default: UNREACHABLE();
4826  }
4827  } else {
4828  ASSERT(right_op->IsConstantOperand());
4829  int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4830  if (shift_count == 0) {
4831  if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4832  DeoptimizeIfNegative(left, instr->environment());
4833  }
4834  __ Mov(result, left, kDiscardForSameWReg);
4835  } else {
4836  switch (instr->op()) {
4837  case Token::ROR: __ Ror(result, left, shift_count); break;
4838  case Token::SAR: __ Asr(result, left, shift_count); break;
4839  case Token::SHL: __ Lsl(result, left, shift_count); break;
4840  case Token::SHR: __ Lsr(result, left, shift_count); break;
4841  default: UNREACHABLE();
4842  }
4843  }
4844  }
4845 }
4846 
4847 
4848 void LCodeGen::DoShiftS(LShiftS* instr) {
4849  LOperand* right_op = instr->right();
4850  Register left = ToRegister(instr->left());
4851  Register result = ToRegister(instr->result());
4852 
4853  // Only ROR by register needs a temp.
4854  ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
4855  (instr->temp() == NULL));
4856 
4857  if (right_op->IsRegister()) {
4858  Register right = ToRegister(instr->right());
4859  switch (instr->op()) {
4860  case Token::ROR: {
4861  Register temp = ToRegister(instr->temp());
4862  __ Ubfx(temp, right, kSmiShift, 5);
4863  __ SmiUntag(result, left);
4864  __ Ror(result.W(), result.W(), temp.W());
4865  __ SmiTag(result);
4866  break;
4867  }
4868  case Token::SAR:
4869  __ Ubfx(result, right, kSmiShift, 5);
4870  __ Asr(result, left, result);
4871  __ Bic(result, result, kSmiShiftMask);
4872  break;
4873  case Token::SHL:
4874  __ Ubfx(result, right, kSmiShift, 5);
4875  __ Lsl(result, left, result);
4876  break;
4877  case Token::SHR:
4878  if (instr->can_deopt()) {
4879  Label right_not_zero;
4880  __ Cbnz(right, &right_not_zero);
4881  DeoptimizeIfNegative(left, instr->environment());
4882  __ Bind(&right_not_zero);
4883  }
4884  __ Ubfx(result, right, kSmiShift, 5);
4885  __ Lsr(result, left, result);
4886  __ Bic(result, result, kSmiShiftMask);
4887  break;
4888  default: UNREACHABLE();
4889  }
4890  } else {
4891  ASSERT(right_op->IsConstantOperand());
4892  int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4893  if (shift_count == 0) {
4894  if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4895  DeoptimizeIfNegative(left, instr->environment());
4896  }
4897  __ Mov(result, left);
4898  } else {
4899  switch (instr->op()) {
4900  case Token::ROR:
4901  __ SmiUntag(result, left);
4902  __ Ror(result.W(), result.W(), shift_count);
4903  __ SmiTag(result);
4904  break;
4905  case Token::SAR:
4906  __ Asr(result, left, shift_count);
4907  __ Bic(result, result, kSmiShiftMask);
4908  break;
4909  case Token::SHL:
4910  __ Lsl(result, left, shift_count);
4911  break;
4912  case Token::SHR:
4913  __ Lsr(result, left, shift_count);
4914  __ Bic(result, result, kSmiShiftMask);
4915  break;
4916  default: UNREACHABLE();
4917  }
4918  }
4919  }
4920 }
4921 
4922 
4923 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
4924  __ Debug("LDebugBreak", 0, BREAK);
4925 }
4926 
4927 
4928 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
4929  ASSERT(ToRegister(instr->context()).is(cp));
4930  Register scratch1 = x5;
4931  Register scratch2 = x6;
4932  ASSERT(instr->IsMarkedAsCall());
4933 
4934  ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
4935  // TODO(all): if Mov could handle object in new space then it could be used
4936  // here.
4937  __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
4938  __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
4939  __ Push(cp, scratch1, scratch2); // The context is the first argument.
4940  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
4941 }
4942 
4943 
4944 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4945  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4946  LoadContextFromDeferred(instr->context());
4947  __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
4948  RecordSafepointWithLazyDeopt(
4949  instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4950  ASSERT(instr->HasEnvironment());
4951  LEnvironment* env = instr->environment();
4952  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4953 }
4954 
4955 
4956 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4957  class DeferredStackCheck: public LDeferredCode {
4958  public:
4959  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4960  : LDeferredCode(codegen), instr_(instr) { }
4961  virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4962  virtual LInstruction* instr() { return instr_; }
4963  private:
4964  LStackCheck* instr_;
4965  };
4966 
4967  ASSERT(instr->HasEnvironment());
4968  LEnvironment* env = instr->environment();
4969  // There is no LLazyBailout instruction for stack-checks. We have to
4970  // prepare for lazy deoptimization explicitly here.
4971  if (instr->hydrogen()->is_function_entry()) {
4972  // Perform stack overflow check.
4973  Label done;
4974  __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4975  __ B(hs, &done);
4976 
4977  PredictableCodeSizeScope predictable(masm_,
4979  ASSERT(instr->context()->IsRegister());
4980  ASSERT(ToRegister(instr->context()).is(cp));
4981  CallCode(isolate()->builtins()->StackCheck(),
4982  RelocInfo::CODE_TARGET,
4983  instr);
4984  __ Bind(&done);
4985  } else {
4986  ASSERT(instr->hydrogen()->is_backwards_branch());
4987  // Perform stack overflow check if this goto needs it before jumping.
4988  DeferredStackCheck* deferred_stack_check =
4989  new(zone()) DeferredStackCheck(this, instr);
4990  __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4991  __ B(lo, deferred_stack_check->entry());
4992 
4993  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4994  __ Bind(instr->done_label());
4995  deferred_stack_check->SetExit(instr->done_label());
4996  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4997  // Don't record a deoptimization index for the safepoint here.
4998  // This will be done explicitly when emitting call and the safepoint in
4999  // the deferred code.
5000  }
5001 }
5002 
5003 
5004 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5005  Register function = ToRegister(instr->function());
5006  Register code_object = ToRegister(instr->code_object());
5007  Register temp = ToRegister(instr->temp());
5008  __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
5009  __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5010 }
5011 
5012 
5013 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5014  Register context = ToRegister(instr->context());
5015  Register value = ToRegister(instr->value());
5016  Register scratch = ToRegister(instr->temp());
5017  MemOperand target = ContextMemOperand(context, instr->slot_index());
5018 
5019  Label skip_assignment;
5020 
5021  if (instr->hydrogen()->RequiresHoleCheck()) {
5022  __ Ldr(scratch, target);
5023  if (instr->hydrogen()->DeoptimizesOnHole()) {
5024  DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
5025  instr->environment());
5026  } else {
5027  __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5028  }
5029  }
5030 
5031  __ Str(value, target);
5032  if (instr->hydrogen()->NeedsWriteBarrier()) {
5033  SmiCheck check_needed =
5034  instr->hydrogen()->value()->IsHeapObject()
5036  __ RecordWriteContextSlot(context,
5037  target.offset(),
5038  value,
5039  scratch,
5041  kSaveFPRegs,
5043  check_needed);
5044  }
5045  __ Bind(&skip_assignment);
5046 }
5047 
5048 
5049 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
5050  Register value = ToRegister(instr->value());
5051  Register cell = ToRegister(instr->temp1());
5052 
5053  // Load the cell.
5054  __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5055 
5056  // If the cell we are storing to contains the hole it could have
5057  // been deleted from the property dictionary. In that case, we need
5058  // to update the property details in the property dictionary to mark
5059  // it as no longer deleted. We deoptimize in that case.
5060  if (instr->hydrogen()->RequiresHoleCheck()) {
5061  Register payload = ToRegister(instr->temp2());
5062  __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
5063  DeoptimizeIfRoot(
5064  payload, Heap::kTheHoleValueRootIndex, instr->environment());
5065  }
5066 
5067  // Store the value.
5068  __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
5069  // Cells are always rescanned, so no write barrier here.
5070 }
5071 
5072 
5073 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5074  Register ext_ptr = ToRegister(instr->elements());
5075  Register key = no_reg;
5076  Register scratch;
5077  ElementsKind elements_kind = instr->elements_kind();
5078 
5079  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5080  bool key_is_constant = instr->key()->IsConstantOperand();
5081  int constant_key = 0;
5082  if (key_is_constant) {
5083  ASSERT(instr->temp() == NULL);
5084  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5085  if (constant_key & 0xf0000000) {
5086  Abort(kArrayIndexConstantValueTooBig);
5087  }
5088  } else {
5089  key = ToRegister(instr->key());
5090  scratch = ToRegister(instr->temp());
5091  }
5092 
5093  MemOperand dst =
5094  PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5095  key_is_constant, constant_key,
5096  elements_kind,
5097  instr->additional_index());
5098 
5099  if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
5100  (elements_kind == FLOAT32_ELEMENTS)) {
5101  DoubleRegister value = ToDoubleRegister(instr->value());
5102  DoubleRegister dbl_scratch = double_scratch();
5103  __ Fcvt(dbl_scratch.S(), value);
5104  __ Str(dbl_scratch.S(), dst);
5105  } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
5106  (elements_kind == FLOAT64_ELEMENTS)) {
5107  DoubleRegister value = ToDoubleRegister(instr->value());
5108  __ Str(value, dst);
5109  } else {
5110  Register value = ToRegister(instr->value());
5111 
5112  switch (elements_kind) {
5116  case UINT8_ELEMENTS:
5118  case INT8_ELEMENTS:
5119  __ Strb(value, dst);
5120  break;
5123  case INT16_ELEMENTS:
5124  case UINT16_ELEMENTS:
5125  __ Strh(value, dst);
5126  break;
5129  case INT32_ELEMENTS:
5130  case UINT32_ELEMENTS:
5131  __ Str(value.W(), dst);
5132  break;
5133  case FLOAT32_ELEMENTS:
5134  case FLOAT64_ELEMENTS:
5137  case FAST_DOUBLE_ELEMENTS:
5138  case FAST_ELEMENTS:
5139  case FAST_SMI_ELEMENTS:
5141  case FAST_HOLEY_ELEMENTS:
5143  case DICTIONARY_ELEMENTS:
5145  UNREACHABLE();
5146  break;
5147  }
5148  }
5149 }
5150 
5151 
5152 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5153  Register elements = ToRegister(instr->elements());
5154  DoubleRegister value = ToDoubleRegister(instr->value());
5155  Register store_base = no_reg;
5156  int offset = 0;
5157 
5158  if (instr->key()->IsConstantOperand()) {
5159  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5160  if (constant_key & 0xf0000000) {
5161  Abort(kArrayIndexConstantValueTooBig);
5162  }
5163  offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
5164  instr->additional_index());
5165  store_base = elements;
5166  } else {
5167  store_base = ToRegister(instr->temp());
5168  Register key = ToRegister(instr->key());
5169  bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5170  CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
5171  instr->hydrogen()->elements_kind());
5172  offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
5173  }
5174 
5175  if (instr->NeedsCanonicalization()) {
5176  DoubleRegister dbl_scratch = double_scratch();
5177  __ Fmov(dbl_scratch,
5179  __ Fmaxnm(dbl_scratch, dbl_scratch, value);
5180  __ Str(dbl_scratch, FieldMemOperand(store_base, offset));
5181  } else {
5182  __ Str(value, FieldMemOperand(store_base, offset));
5183  }
5184 }
5185 
5186 
5187 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5188  Register value = ToRegister(instr->value());
5189  Register elements = ToRegister(instr->elements());
5190  Register scratch = no_reg;
5191  Register store_base = no_reg;
5192  Register key = no_reg;
5193  int offset = 0;
5194 
5195  if (!instr->key()->IsConstantOperand() ||
5196  instr->hydrogen()->NeedsWriteBarrier()) {
5197  scratch = ToRegister(instr->temp());
5198  }
5199 
5200  if (instr->key()->IsConstantOperand()) {
5201  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5202  offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
5203  instr->additional_index());
5204  store_base = elements;
5205  } else {
5206  store_base = scratch;
5207  key = ToRegister(instr->key());
5208  bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5209  CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
5210  instr->hydrogen()->elements_kind());
5211  offset = FixedArray::OffsetOfElementAt(instr->additional_index());
5212  }
5213  Representation representation = instr->hydrogen()->value()->representation();
5214  if (representation.IsInteger32()) {
5215  ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5216  ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5217  STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
5218  __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
5220  } else {
5221  __ Store(value, FieldMemOperand(store_base, offset), representation);
5222  }
5223 
5224  if (instr->hydrogen()->NeedsWriteBarrier()) {
5225  ASSERT(representation.IsTagged());
5226  // This assignment may cause element_addr to alias store_base.
5227  Register element_addr = scratch;
5228  SmiCheck check_needed =
5229  instr->hydrogen()->value()->IsHeapObject()
5231  // Compute address of modified element and store it into key register.
5232  __ Add(element_addr, store_base, offset - kHeapObjectTag);
5233  __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5234  kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed);
5235  }
5236 }
5237 
5238 
5239 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5240  ASSERT(ToRegister(instr->context()).is(cp));
5241  ASSERT(ToRegister(instr->object()).Is(x2));
5242  ASSERT(ToRegister(instr->key()).Is(x1));
5243  ASSERT(ToRegister(instr->value()).Is(x0));
5244 
5245  Handle<Code> ic = instr->strict_mode() == STRICT
5246  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5247  : isolate()->builtins()->KeyedStoreIC_Initialize();
5248  CallCode(ic, RelocInfo::CODE_TARGET, instr);
5249 }
5250 
5251 
5252 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5253  Representation representation = instr->representation();
5254 
5255  Register object = ToRegister(instr->object());
5256  HObjectAccess access = instr->hydrogen()->access();
5257  Handle<Map> transition = instr->transition();
5258  int offset = access.offset();
5259 
5260  if (access.IsExternalMemory()) {
5261  ASSERT(transition.is_null());
5262  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5263  Register value = ToRegister(instr->value());
5264  __ Store(value, MemOperand(object, offset), representation);
5265  return;
5266  } else if (representation.IsDouble()) {
5267  ASSERT(transition.is_null());
5268  ASSERT(access.IsInobject());
5269  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5270  FPRegister value = ToDoubleRegister(instr->value());
5271  __ Str(value, FieldMemOperand(object, offset));
5272  return;
5273  }
5274 
5275  Register value = ToRegister(instr->value());
5276 
5277  SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject()
5279 
5280  ASSERT(!(representation.IsSmi() &&
5281  instr->value()->IsConstantOperand() &&
5282  !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
5283  if (representation.IsHeapObject() &&
5284  !instr->hydrogen()->value()->type().IsHeapObject()) {
5285  DeoptimizeIfSmi(value, instr->environment());
5286 
5287  // We know that value is a smi now, so we can omit the check below.
5288  check_needed = OMIT_SMI_CHECK;
5289  }
5290 
5291  if (!transition.is_null()) {
5292  // Store the new map value.
5293  Register new_map_value = ToRegister(instr->temp0());
5294  __ Mov(new_map_value, Operand(transition));
5295  __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5296  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5297  // Update the write barrier for the map field.
5298  __ RecordWriteField(object,
5300  new_map_value,
5301  ToRegister(instr->temp1()),
5303  kSaveFPRegs,
5305  OMIT_SMI_CHECK);
5306  }
5307  }
5308 
5309  // Do the store.
5310  Register destination;
5311  if (access.IsInobject()) {
5312  destination = object;
5313  } else {
5314  Register temp0 = ToRegister(instr->temp0());
5315  __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5316  destination = temp0;
5317  }
5318 
5319  if (representation.IsSmi() &&
5320  instr->hydrogen()->value()->representation().IsInteger32()) {
5321  ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5322 #ifdef DEBUG
5323  Register temp0 = ToRegister(instr->temp0());
5324  __ Ldr(temp0, FieldMemOperand(destination, offset));
5325  __ AssertSmi(temp0);
5326  // If destination aliased temp0, restore it to the address calculated
5327  // earlier.
5328  if (destination.Is(temp0)) {
5329  ASSERT(!access.IsInobject());
5330  __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5331  }
5332 #endif
5333  STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
5334  __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5336  } else {
5337  __ Store(value, FieldMemOperand(destination, offset), representation);
5338  }
5339  if (instr->hydrogen()->NeedsWriteBarrier()) {
5340  __ RecordWriteField(destination,
5341  offset,
5342  value, // Clobbered.
5343  ToRegister(instr->temp1()), // Clobbered.
5345  kSaveFPRegs,
5347  check_needed);
5348  }
5349 }
5350 
5351 
5352 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5353  ASSERT(ToRegister(instr->context()).is(cp));
5354  ASSERT(ToRegister(instr->value()).is(x0));
5355  ASSERT(ToRegister(instr->object()).is(x1));
5356 
5357  // Name must be in x2.
5358  __ Mov(x2, Operand(instr->name()));
5359  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
5360  CallCode(ic, RelocInfo::CODE_TARGET, instr);
5361 }
5362 
5363 
5364 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5365  ASSERT(ToRegister(instr->context()).is(cp));
5366  ASSERT(ToRegister(instr->left()).Is(x1));
5367  ASSERT(ToRegister(instr->right()).Is(x0));
5368  StringAddStub stub(instr->hydrogen()->flags(),
5369  instr->hydrogen()->pretenure_flag());
5370  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5371 }
5372 
5373 
5374 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5375  class DeferredStringCharCodeAt: public LDeferredCode {
5376  public:
5377  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5378  : LDeferredCode(codegen), instr_(instr) { }
5379  virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5380  virtual LInstruction* instr() { return instr_; }
5381  private:
5382  LStringCharCodeAt* instr_;
5383  };
5384 
5385  DeferredStringCharCodeAt* deferred =
5386  new(zone()) DeferredStringCharCodeAt(this, instr);
5387 
5389  ToRegister(instr->string()),
5390  ToRegister32(instr->index()),
5391  ToRegister(instr->result()),
5392  deferred->entry());
5393  __ Bind(deferred->exit());
5394 }
5395 
5396 
5397 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5398  Register string = ToRegister(instr->string());
5399  Register result = ToRegister(instr->result());
5400 
5401  // TODO(3095996): Get rid of this. For now, we need to make the
5402  // result register contain a valid pointer because it is already
5403  // contained in the register pointer map.
5404  __ Mov(result, 0);
5405 
5406  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5407  __ Push(string);
5408  // Push the index as a smi. This is safe because of the checks in
5409  // DoStringCharCodeAt above.
5410  Register index = ToRegister(instr->index());
5411  __ SmiTag(index);
5412  __ Push(index);
5413 
5414  CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
5415  instr->context());
5416  __ AssertSmi(x0);
5417  __ SmiUntag(x0);
5418  __ StoreToSafepointRegisterSlot(x0, result);
5419 }
5420 
5421 
5422 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5423  class DeferredStringCharFromCode: public LDeferredCode {
5424  public:
5425  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5426  : LDeferredCode(codegen), instr_(instr) { }
5427  virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5428  virtual LInstruction* instr() { return instr_; }
5429  private:
5430  LStringCharFromCode* instr_;
5431  };
5432 
5433  DeferredStringCharFromCode* deferred =
5434  new(zone()) DeferredStringCharFromCode(this, instr);
5435 
5436  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5437  Register char_code = ToRegister32(instr->char_code());
5438  Register result = ToRegister(instr->result());
5439 
5440  __ Cmp(char_code, String::kMaxOneByteCharCode);
5441  __ B(hi, deferred->entry());
5442  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5443  __ Add(result, result, Operand(char_code, SXTW, kPointerSizeLog2));
5444  __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
5445  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5446  __ B(eq, deferred->entry());
5447  __ Bind(deferred->exit());
5448 }
5449 
5450 
5451 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5452  Register char_code = ToRegister(instr->char_code());
5453  Register result = ToRegister(instr->result());
5454 
5455  // TODO(3095996): Get rid of this. For now, we need to make the
5456  // result register contain a valid pointer because it is already
5457  // contained in the register pointer map.
5458  __ Mov(result, 0);
5459 
5460  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5461  __ SmiTag(char_code);
5462  __ Push(char_code);
5463  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5464  __ StoreToSafepointRegisterSlot(x0, result);
5465 }
5466 
5467 
5468 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5469  ASSERT(ToRegister(instr->context()).is(cp));
5470  Token::Value op = instr->op();
5471 
5472  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
5473  CallCode(ic, RelocInfo::CODE_TARGET, instr);
5475 
5476  Condition condition = TokenToCondition(op, false);
5477 
5478  EmitCompareAndBranch(instr, condition, x0, 0);
5479 }
5480 
5481 
5482 void LCodeGen::DoSubI(LSubI* instr) {
5483  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5484  Register result = ToRegister32(instr->result());
5485  Register left = ToRegister32(instr->left());
5486  Operand right = ToOperand32I(instr->right());
5487  if (can_overflow) {
5488  __ Subs(result, left, right);
5489  DeoptimizeIf(vs, instr->environment());
5490  } else {
5491  __ Sub(result, left, right);
5492  }
5493 }
5494 
5495 
5496 void LCodeGen::DoSubS(LSubS* instr) {
5497  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5498  Register result = ToRegister(instr->result());
5499  Register left = ToRegister(instr->left());
5500  Operand right = ToOperand(instr->right());
5501  if (can_overflow) {
5502  __ Subs(result, left, right);
5503  DeoptimizeIf(vs, instr->environment());
5504  } else {
5505  __ Sub(result, left, right);
5506  }
5507 }
5508 
5509 
5510 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5511  LOperand* value,
5512  LOperand* temp1,
5513  LOperand* temp2) {
5514  Register input = ToRegister(value);
5515  Register scratch1 = ToRegister(temp1);
5516  DoubleRegister dbl_scratch1 = double_scratch();
5517 
5518  Label done;
5519 
5520  // Load heap object map.
5521  __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
5522 
5523  if (instr->truncating()) {
5524  Register output = ToRegister(instr->result());
5525  Label check_bools;
5526 
5527  // If it's not a heap number, jump to undefined check.
5528  __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
5529 
5530  // A heap number: load value and convert to int32 using truncating function.
5531  __ TruncateHeapNumberToI(output, input);
5532  __ B(&done);
5533 
5534  __ Bind(&check_bools);
5535 
5536  Register true_root = output;
5537  Register false_root = scratch1;
5538  __ LoadTrueFalseRoots(true_root, false_root);
5539  __ Cmp(input, true_root);
5540  __ Cset(output, eq);
5541  __ Ccmp(input, false_root, ZFlag, ne);
5542  __ B(eq, &done);
5543 
5544  // Output contains zero, undefined is converted to zero for truncating
5545  // conversions.
5546  DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
5547  instr->environment());
5548  } else {
5549  Register output = ToRegister32(instr->result());
5550 
5551  DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5552 
5553  // Deoptimized if it's not a heap number.
5554  DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
5555  instr->environment());
5556 
5557  // A heap number: load value and convert to int32 using non-truncating
5558  // function. If the result is out of range, branch to deoptimize.
5559  __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5560  __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2);
5561  DeoptimizeIf(ne, instr->environment());
5562 
5563  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5564  __ Cmp(output, 0);
5565  __ B(ne, &done);
5566  __ Fmov(scratch1, dbl_scratch1);
5567  DeoptimizeIfNegative(scratch1, instr->environment());
5568  }
5569  }
5570  __ Bind(&done);
5571 }
5572 
5573 
5574 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5575  class DeferredTaggedToI: public LDeferredCode {
5576  public:
5577  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5578  : LDeferredCode(codegen), instr_(instr) { }
5579  virtual void Generate() {
5580  codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5581  instr_->temp2());
5582  }
5583 
5584  virtual LInstruction* instr() { return instr_; }
5585  private:
5586  LTaggedToI* instr_;
5587  };
5588 
5589  Register input = ToRegister(instr->value());
5590  Register output = ToRegister(instr->result());
5591 
5592  if (instr->hydrogen()->value()->representation().IsSmi()) {
5593  __ SmiUntag(output, input);
5594  } else {
5595  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5596 
5597  __ JumpIfNotSmi(input, deferred->entry());
5598  __ SmiUntag(output, input);
5599  __ Bind(deferred->exit());
5600  }
5601 }
5602 
5603 
5604 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5605  Register result = ToRegister(instr->result());
5607 }
5608 
5609 
5610 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5611  ASSERT(ToRegister(instr->value()).Is(x0));
5612  ASSERT(ToRegister(instr->result()).Is(x0));
5613  __ Push(x0);
5614  CallRuntime(Runtime::kToFastProperties, 1, instr);
5615 }
5616 
5617 
5618 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5619  ASSERT(ToRegister(instr->context()).is(cp));
5620  Label materialized;
5621  // Registers will be used as follows:
5622  // x7 = literals array.
5623  // x1 = regexp literal.
5624  // x0 = regexp literal clone.
5625  // x10-x12 are used as temporaries.
5626  int literal_offset =
5627  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5628  __ LoadObject(x7, instr->hydrogen()->literals());
5629  __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5630  __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5631 
5632  // Create regexp literal using runtime function
5633  // Result will be in x0.
5634  __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5635  __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5636  __ Mov(x10, Operand(instr->hydrogen()->flags()));
5637  __ Push(x7, x12, x11, x10);
5638  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5639  __ Mov(x1, x0);
5640 
5641  __ Bind(&materialized);
5643  Label allocated, runtime_allocate;
5644 
5645  __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5646  __ B(&allocated);
5647 
5648  __ Bind(&runtime_allocate);
5649  __ Mov(x0, Smi::FromInt(size));
5650  __ Push(x1, x0);
5651  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5652  __ Pop(x1);
5653 
5654  __ Bind(&allocated);
5655  // Copy the content into the newly allocated memory.
5656  __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5657 }
5658 
5659 
5660 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5661  Register object = ToRegister(instr->object());
5662  Register temp1 = ToRegister(instr->temp1());
5663 
5664  Handle<Map> from_map = instr->original_map();
5665  Handle<Map> to_map = instr->transitioned_map();
5666  ElementsKind from_kind = instr->from_kind();
5667  ElementsKind to_kind = instr->to_kind();
5668 
5669  Label not_applicable;
5670  __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
5671 
5672  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5673  Register new_map = ToRegister(instr->temp2());
5674  __ Mov(new_map, Operand(to_map));
5675  __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5676  // Write barrier.
5677  __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
5679  } else {
5680  ASSERT(ToRegister(instr->context()).is(cp));
5681  PushSafepointRegistersScope scope(
5682  this, Safepoint::kWithRegistersAndDoubles);
5683  __ Mov(x0, object);
5684  __ Mov(x1, Operand(to_map));
5685  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5686  TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
5687  __ CallStub(&stub);
5688  RecordSafepointWithRegistersAndDoubles(
5689  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5690  }
5691  __ Bind(&not_applicable);
5692 }
5693 
5694 
5695 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5696  Register object = ToRegister(instr->object());
5697  Register temp1 = ToRegister(instr->temp1());
5698  Register temp2 = ToRegister(instr->temp2());
5699 
5700  Label no_memento_found;
5701  __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found);
5702  Deoptimize(instr->environment());
5703  __ Bind(&no_memento_found);
5704 }
5705 
5706 
5707 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5708  DoubleRegister input = ToDoubleRegister(instr->value());
5709  Register result = ToRegister(instr->result());
5710  __ TruncateDoubleToI(result, input);
5711  if (instr->tag_result()) {
5712  __ SmiTag(result, result);
5713  }
5714 }
5715 
5716 
5717 void LCodeGen::DoTypeof(LTypeof* instr) {
5718  Register input = ToRegister(instr->value());
5719  __ Push(input);
5720  CallRuntime(Runtime::kTypeof, 1, instr);
5721 }
5722 
5723 
5724 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5725  Handle<String> type_name = instr->type_literal();
5726  Label* true_label = instr->TrueLabel(chunk_);
5727  Label* false_label = instr->FalseLabel(chunk_);
5728  Register value = ToRegister(instr->value());
5729 
5730  if (type_name->Equals(heap()->number_string())) {
5731  ASSERT(instr->temp1() != NULL);
5732  Register map = ToRegister(instr->temp1());
5733 
5734  __ JumpIfSmi(value, true_label);
5735  __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
5736  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5737  EmitBranch(instr, eq);
5738 
5739  } else if (type_name->Equals(heap()->string_string())) {
5740  ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5741  Register map = ToRegister(instr->temp1());
5742  Register scratch = ToRegister(instr->temp2());
5743 
5744  __ JumpIfSmi(value, false_label);
5745  __ JumpIfObjectType(
5746  value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5747  __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5748  EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5749 
5750  } else if (type_name->Equals(heap()->symbol_string())) {
5751  ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5752  Register map = ToRegister(instr->temp1());
5753  Register scratch = ToRegister(instr->temp2());
5754 
5755  __ JumpIfSmi(value, false_label);
5756  __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5757  EmitBranch(instr, eq);
5758 
5759  } else if (type_name->Equals(heap()->boolean_string())) {
5760  __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5761  __ CompareRoot(value, Heap::kFalseValueRootIndex);
5762  EmitBranch(instr, eq);
5763 
5764  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5765  __ CompareRoot(value, Heap::kNullValueRootIndex);
5766  EmitBranch(instr, eq);
5767 
5768  } else if (type_name->Equals(heap()->undefined_string())) {
5769  ASSERT(instr->temp1() != NULL);
5770  Register scratch = ToRegister(instr->temp1());
5771 
5772  __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5773  __ JumpIfSmi(value, false_label);
5774  // Check for undetectable objects and jump to the true branch in this case.
5775  __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5776  __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5777  EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5778 
5779  } else if (type_name->Equals(heap()->function_string())) {
5781  ASSERT(instr->temp1() != NULL);
5782  Register type = ToRegister(instr->temp1());
5783 
5784  __ JumpIfSmi(value, false_label);
5785  __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5786  // HeapObject's type has been loaded into type register by JumpIfObjectType.
5787  EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5788 
5789  } else if (type_name->Equals(heap()->object_string())) {
5790  ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5791  Register map = ToRegister(instr->temp1());
5792  Register scratch = ToRegister(instr->temp2());
5793 
5794  __ JumpIfSmi(value, false_label);
5795  if (!FLAG_harmony_typeof) {
5796  __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5797  }
5798  __ JumpIfObjectType(value, map, scratch,
5799  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5800  __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5801  __ B(gt, false_label);
5802  // Check for undetectable objects => false.
5803  __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
5804  EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5805 
5806  } else {
5807  __ B(false_label);
5808  }
5809 }
5810 
5811 
5812 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5813  __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5814 }
5815 
5816 
5817 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5818  Register object = ToRegister(instr->value());
5819  Register map = ToRegister(instr->map());
5820  Register temp = ToRegister(instr->temp());
5821  __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5822  __ Cmp(map, temp);
5823  DeoptimizeIf(ne, instr->environment());
5824 }
5825 
5826 
5827 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5828  Register receiver = ToRegister(instr->receiver());
5829  Register function = ToRegister(instr->function());
5830  Register result = ToRegister(instr->result());
5831 
5832  // If the receiver is null or undefined, we have to pass the global object as
5833  // a receiver to normal functions. Values have to be passed unchanged to
5834  // builtins and strict-mode functions.
5835  Label global_object, done, deopt;
5836 
5837  if (!instr->hydrogen()->known_function()) {
5838  __ Ldr(result, FieldMemOperand(function,
5840 
5841  // CompilerHints is an int32 field. See objects.h.
5842  __ Ldr(result.W(),
5844 
5845  // Do not transform the receiver to object for strict mode functions.
5846  __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done);
5847 
5848  // Do not transform the receiver to object for builtins.
5849  __ Tbnz(result, SharedFunctionInfo::kNative, &done);
5850  }
5851 
5852  // Normal function. Replace undefined or null with global receiver.
5853  __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5854  __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5855 
5856  // Deoptimize if the receiver is not a JS object.
5857  __ JumpIfSmi(receiver, &deopt);
5858  __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5859  __ Mov(result, receiver);
5860  __ B(ge, &done);
5861  // Otherwise, fall through to deopt.
5862 
5863  __ Bind(&deopt);
5864  Deoptimize(instr->environment());
5865 
5866  __ Bind(&global_object);
5867  __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5868  __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5870 
5871  __ Bind(&done);
5872 }
5873 
5874 
5875 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5876  Register object = ToRegister(instr->object());
5877  Register index = ToRegister(instr->index());
5878  Register result = ToRegister(instr->result());
5879 
5880  __ AssertSmi(index);
5881 
5882  Label out_of_object, done;
5883  __ Cmp(index, Smi::FromInt(0));
5884  __ B(lt, &out_of_object);
5885 
5887  __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5888  __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
5889 
5890  __ B(&done);
5891 
5892  __ Bind(&out_of_object);
5893  __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5894  // Index is equal to negated out of object property index plus 1.
5895  __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5896  __ Ldr(result, FieldMemOperand(result,
5897  FixedArray::kHeaderSize - kPointerSize));
5898  __ Bind(&done);
5899 }
5900 
5901 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1267
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static DwVfpRegister FromAllocationIndex(int index)
void FinishCode(Handle< Code > code)
static const int kHashFieldOffset
Definition: objects.h:8629
virtual void EmitInverted(Label *label) const
const int kMinInt
Definition: globals.h:249
static const int kBitFieldOffset
Definition: objects.h:6461
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
const intptr_t kSmiTagMask
Definition: v8.h:5480
static const int kCodeEntryOffset
Definition: objects.h:7518
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
static const int kValueOffset
Definition: objects.h:9547
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static const int kEnumCacheOffset
Definition: objects.h:3499
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const Register cp
static const int kCallSizeWithRelocation
int StackSlotOffset(int index)
Definition: lithium.cc:240
const LowDwVfpRegister d0
virtual void BeforeCall(int call_size) const
RegisterType type() const
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
Smi * ToSmi(LConstantOperand *op) const
const int KB
Definition: globals.h:245
static TypeFeedbackId None()
Definition: utils.h:1149
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static Handle< Code > GetUninitialized(Isolate *isolate, Token::Value op)
Definition: ic.cc:2489
static const int kDataOffset
Definition: objects.h:4970
BranchIfHeapNumber(LCodeGen *codegen, const Register &value)
bool IsSmi(LConstantOperand *op) const
static Handle< T > cast(Handle< S > that)
Definition: handles.h:75
static const int kGlobalReceiverOffset
Definition: objects.h:7613
static Representation Integer32()
const int kSmiValueSize
Definition: v8.h:5540
virtual void EmitInverted(Label *label) const
BranchIfNonZeroNumber(LCodeGen *codegen, const FPRegister &value, const FPRegister &scratch)
virtual void Emit(Label *label) const
#define __
int int32_t
Definition: unicode.cc:47
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8673
const int kMaxInt
Definition: globals.h:248
int MaskToBit(uint64_t mask)
AllocationSiteOverrideMode
Definition: code-stubs.h:759
static void EmitNotInlined(MacroAssembler *masm)
static const int kSize
Definition: objects.h:7922
#define ASSERT(condition)
Definition: checks.h:329
virtual void Emit(Label *label) const
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
static const int kContextOffset
Definition: frames.h:185
const int kPointerSizeLog2
Definition: globals.h:281
static const int kInObjectFieldCount
Definition: objects.h:7976
const uint32_t kStringRepresentationMask
Definition: objects.h:615
virtual void EmitInverted(Label *label) const
static const int kCallerFPOffset
Definition: frames.h:188
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
static Handle< Code > initialize_stub(Isolate *isolate, ExtraICState extra_state)
Definition: ic.cc:782
static const int kInstanceClassNameOffset
Definition: objects.h:7107
int WhichPowerOf2(uint32_t x)
Definition: utils.h:57
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Variable * parameter(int index) const
Definition: scopes.h:333
static Operand UntagSmiAndScale(Register smi, int scale)
bool IsInteger32Constant(LConstantOperand *op) const
static const int kContextOffset
Definition: objects.h:7523
virtual void Emit(Label *label) const
Condition ReverseConditionForCmp(Condition cond)
const int64_t kXSignBit
void DoDeferredStackCheck(LStackCheck *instr)
const uint64_t kSmiShiftMask
int LookupDestination(int block_id) const
const unsigned kWRegSizeInBits
#define IN
Operand ToOperand(LOperand *op)
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
MemOperand UntagSmiMemOperand(Register object, int offset)
const uint64_t kHoleNanInt64
Definition: v8globals.h:458
#define UNREACHABLE()
Definition: checks.h:52
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:261
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
static const int kLengthOffset
Definition: objects.h:8905
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
bool IsFastPackedElementsKind(ElementsKind kind)
Condition InvertCondition(Condition cond)
const int kDoubleSize
Definition: globals.h:266
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:7098
void DoDeferredNumberTagD(LNumberTagD *instr)
static uint32_t update(uint32_tprevious, AllocationSpacevalue)
Definition: utils.h:296
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:683
const int kPointerSize
Definition: globals.h:268
BranchOnCondition(LCodeGen *codegen, Condition cond)
void check(i::Vector< const uint8_t > string)
const unsigned kInstructionSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
Definition: flags.cc:211
static void MaybeCallEntryHook(MacroAssembler *masm)
DwVfpRegister ToDoubleRegister(LOperand *op) const
const int kHeapObjectTag
Definition: v8.h:5473
static FPRegister FromAllocationIndex(unsigned int index)
void DoDeferredAllocate(LAllocate *instr)
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
Definition: deoptimizer.cc:701
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
static const int kCallerSPOffset
Definition: frames.h:190
static const int kCacheStampOffset
Definition: objects.h:7787
bool IsFixedTypedArrayElementsKind(ElementsKind kind)
static const int kPropertiesOffset
Definition: objects.h:2755
int32_t ToInteger32(LConstantOperand *op) const
int num_parameters() const
Definition: scopes.h:338
static const int kMarkerOffset
Definition: frames.h:184
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
Definition: objects.h:3160
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
Handle< Object > ToHandle(LConstantOperand *op) const
static const int kHeaderSize
Definition: objects.h:9042
const int kSmiShift
bool IsPowerOf2(T x)
Definition: utils.h:51
bool Is(Object *obj)
static const int kArrayIndexValueBits
Definition: objects.h:8654
virtual void Emit(Label *label) const
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
const int64_t kWSignBit
double ToDouble(LConstantOperand *op) const
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
Definition: utils.h:574
static int OffsetOfElementAt(int index)
Definition: objects.h:3070
static int SizeFor(int length)
Definition: objects.h:3067
Operand ToOperand32U(LOperand *op)
const unsigned kXRegSize
bool NeedsDeferredFrame() const
static const int kHeaderSize
Definition: objects.h:3016
AllocationFlags
const Register lr
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
Definition: shell.cc:171
static const int kMapOffset
Definition: objects.h:1890
static const int kValueOffset
Definition: objects.h:7779
static const int kFixedFrameSizeFromFp
Definition: frames.h:180
Operand ToOperand32I(LOperand *op)
virtual void EmitInverted(Label *label) const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
MemOperand FieldMemOperand(Register object, int offset)
virtual void AfterCall() const
static const int kHasNonInstancePrototype
Definition: objects.h:6468
void WriteTranslation(LEnvironment *environment, Translation *translation)
static const int kNotDeoptimizationEntry
Definition: deoptimizer.h:258
LinkRegisterStatus GetLinkRegisterState() const
const int kSmiTagSize
Definition: v8.h:5479
static const int kHeaderSize
Definition: objects.h:5604
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
const int64_t kXSignMask
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
T Abs(T a)
Definition: utils.h:241
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
Definition: objects-inl.h:5190
static const int kConstructorOffset
Definition: objects.h:6428
const uint32_t kOneByteStringTag
Definition: objects.h:611
static double canonical_not_the_hole_nan_as_double()
Definition: objects-inl.h:2166
CompareAndBranch(LCodeGen *codegen, Condition cond, const Register &lhs, const Operand &rhs)
const int kSmiTag
Definition: v8.h:5478
static const int kIsUndetectable
Definition: objects.h:6472
static const int kHeaderSize
Definition: objects.h:2757
#define X
Register ToRegister(LOperand *op) const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
void DoDeferredTaggedToI(LTaggedToI *instr)
static const int kPrototypeOffset
Definition: objects.h:6427
Register ToRegister32(LOperand *op) const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
static const int kHashShift
Definition: objects.h:8642
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
const LowDwVfpRegister d1
const Register fp
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
MemOperand ToMemOperand(LOperand *op) const
bool IsNextEmittedBlock(int block_id) const
MemOperand ContextMemOperand(Register context, int index)
virtual void EmitInverted(Label *label) const
static const int kCompilerHintsOffset
Definition: objects.h:7171
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
static const int kSharedFunctionInfoOffset
Definition: objects.h:7521
#define ASM_UNIMPLEMENTED_BREAK(message)
Definition: checks.h:68
BranchType InvertBranchType(BranchType type)
static const int kMaxValue
Definition: objects.h:1681
virtual void EmitInverted(Label *label) const
const int kCharSize
Definition: globals.h:261
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8914
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1516
MemOperand GlobalObjectMemOperand()
const int kUC16Size
Definition: globals.h:312
TestAndBranch(LCodeGen *codegen, Condition cond, const Register &value, uint64_t mask)
const uint32_t kStringEncodingMask
Definition: objects.h:609
BranchIfRoot(LCodeGen *codegen, const Register &value, Heap::RootListIndex index)
virtual void Emit(Label *label) const
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
virtual void Emit(Label *label) const
static const int kInstanceTypeOffset
Definition: objects.h:6459