v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-x64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_X64
31 
33 #include "code-stubs.h"
34 #include "stub-cache.h"
35 #include "hydrogen-osr.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 // When invoking builtins, we need to record the safepoint in the middle of
42 // the invoke instruction sequence generated by the macro assembler.
43 class SafepointGenerator V8_FINAL : public CallWrapper {
44  public:
45  SafepointGenerator(LCodeGen* codegen,
46  LPointerMap* pointers,
47  Safepoint::DeoptMode mode)
48  : codegen_(codegen),
49  pointers_(pointers),
50  deopt_mode_(mode) { }
51  virtual ~SafepointGenerator() {}
52 
53  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
54 
55  virtual void AfterCall() const V8_OVERRIDE {
56  codegen_->RecordSafepoint(pointers_, deopt_mode_);
57  }
58 
59  private:
60  LCodeGen* codegen_;
61  LPointerMap* pointers_;
62  Safepoint::DeoptMode deopt_mode_;
63 };
64 
65 
66 #define __ masm()->
67 
69  LPhase phase("Z_Code generation", chunk());
70  ASSERT(is_unused());
71  status_ = GENERATING;
72 
73  // Open a frame scope to indicate that there is a frame on the stack. The
74  // MANUAL indicates that the scope shouldn't actually generate code to set up
75  // the frame (that is done in GeneratePrologue).
76  FrameScope frame_scope(masm_, StackFrame::MANUAL);
77 
78  return GeneratePrologue() &&
79  GenerateBody() &&
80  GenerateDeferredCode() &&
81  GenerateJumpTable() &&
82  GenerateSafepointTable();
83 }
84 
85 
86 void LCodeGen::FinishCode(Handle<Code> code) {
87  ASSERT(is_done());
88  code->set_stack_slots(GetStackSlotCount());
89  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
91  PopulateDeoptimizationData(code);
92  info()->CommitDependencies(code);
93 }
94 
95 
96 void LChunkBuilder::Abort(BailoutReason reason) {
97  info()->set_bailout_reason(reason);
98  status_ = ABORTED;
99 }
100 
101 
102 #ifdef _MSC_VER
103 void LCodeGen::MakeSureStackPagesMapped(int offset) {
104  const int kPageSize = 4 * KB;
105  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
106  __ movp(Operand(rsp, offset), rax);
107  }
108 }
109 #endif
110 
111 
112 void LCodeGen::SaveCallerDoubles() {
113  ASSERT(info()->saves_caller_doubles());
115  Comment(";;; Save clobbered callee double registers");
116  int count = 0;
117  BitVector* doubles = chunk()->allocated_double_registers();
118  BitVector::Iterator save_iterator(doubles);
119  while (!save_iterator.Done()) {
120  __ movsd(MemOperand(rsp, count * kDoubleSize),
121  XMMRegister::FromAllocationIndex(save_iterator.Current()));
122  save_iterator.Advance();
123  count++;
124  }
125 }
126 
127 
128 void LCodeGen::RestoreCallerDoubles() {
129  ASSERT(info()->saves_caller_doubles());
131  Comment(";;; Restore clobbered callee double registers");
132  BitVector* doubles = chunk()->allocated_double_registers();
133  BitVector::Iterator save_iterator(doubles);
134  int count = 0;
135  while (!save_iterator.Done()) {
136  __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
137  MemOperand(rsp, count * kDoubleSize));
138  save_iterator.Advance();
139  count++;
140  }
141 }
142 
143 
144 bool LCodeGen::GeneratePrologue() {
145  ASSERT(is_generating());
146 
147  if (info()->IsOptimizing()) {
149 
150 #ifdef DEBUG
151  if (strlen(FLAG_stop_at) > 0 &&
152  info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
153  __ int3();
154  }
155 #endif
156 
157  // Sloppy mode functions need to replace the receiver with the global proxy
158  // when called as functions (without an explicit receiver object).
159  if (info_->this_has_uses() &&
160  info_->strict_mode() == SLOPPY &&
161  !info_->is_native()) {
162  Label ok;
163  StackArgumentsAccessor args(rsp, scope()->num_parameters());
164  __ movp(rcx, args.GetReceiverOperand());
165 
166  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
167  __ j(not_equal, &ok, Label::kNear);
168 
169  __ movp(rcx, GlobalObjectOperand());
171 
172  __ movp(args.GetReceiverOperand(), rcx);
173 
174  __ bind(&ok);
175  }
176  }
177 
178  info()->set_prologue_offset(masm_->pc_offset());
179  if (NeedsEagerFrame()) {
180  ASSERT(!frame_is_built_);
181  frame_is_built_ = true;
182  __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
183  info()->AddNoFrameRange(0, masm_->pc_offset());
184  }
185 
186  // Reserve space for the stack slots needed by the code.
187  int slots = GetStackSlotCount();
188  if (slots > 0) {
189  if (FLAG_debug_code) {
190  __ subp(rsp, Immediate(slots * kPointerSize));
191 #ifdef _MSC_VER
192  MakeSureStackPagesMapped(slots * kPointerSize);
193 #endif
194  __ Push(rax);
195  __ Set(rax, slots);
197  Label loop;
198  __ bind(&loop);
201  __ decl(rax);
202  __ j(not_zero, &loop);
203  __ Pop(rax);
204  } else {
205  __ subp(rsp, Immediate(slots * kPointerSize));
206 #ifdef _MSC_VER
207  MakeSureStackPagesMapped(slots * kPointerSize);
208 #endif
209  }
210 
211  if (info()->saves_caller_doubles()) {
212  SaveCallerDoubles();
213  }
214  }
215 
216  // Possibly allocate a local context.
217  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
218  if (heap_slots > 0) {
219  Comment(";;; Allocate local context");
220  // Argument to NewContext is the function, which is still in rdi.
221  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
222  FastNewContextStub stub(heap_slots);
223  __ CallStub(&stub);
224  } else {
225  __ Push(rdi);
226  __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
227  }
228  RecordSafepoint(Safepoint::kNoLazyDeopt);
229  // Context is returned in rax. It replaces the context passed to us.
230  // It's saved in the stack and kept live in rsi.
231  __ movp(rsi, rax);
233 
234  // Copy any necessary parameters into the context.
235  int num_parameters = scope()->num_parameters();
236  for (int i = 0; i < num_parameters; i++) {
237  Variable* var = scope()->parameter(i);
238  if (var->IsContextSlot()) {
239  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
240  (num_parameters - 1 - i) * kPointerSize;
241  // Load parameter from stack.
242  __ movp(rax, Operand(rbp, parameter_offset));
243  // Store it in the context.
244  int context_offset = Context::SlotOffset(var->index());
245  __ movp(Operand(rsi, context_offset), rax);
246  // Update the write barrier. This clobbers rax and rbx.
247  __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
248  }
249  }
250  Comment(";;; End allocate local context");
251  }
252 
253  // Trace the call.
254  if (FLAG_trace && info()->IsOptimizing()) {
255  __ CallRuntime(Runtime::kTraceEnter, 0);
256  }
257  return !is_aborted();
258 }
259 
260 
261 void LCodeGen::GenerateOsrPrologue() {
262  // Generate the OSR entry prologue at the first unknown OSR value, or if there
263  // are none, at the OSR entrypoint instruction.
264  if (osr_pc_offset_ >= 0) return;
265 
266  osr_pc_offset_ = masm()->pc_offset();
267 
268  // Adjust the frame size, subsuming the unoptimized frame into the
269  // optimized frame.
270  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
271  ASSERT(slots >= 0);
272  __ subp(rsp, Immediate(slots * kPointerSize));
273 }
274 
275 
276 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
277  if (instr->IsCall()) {
278  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
279  }
280  if (!instr->IsLazyBailout() && !instr->IsGap()) {
281  safepoints_.BumpLastLazySafepointIndex();
282  }
283 }
284 
285 
286 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
287  if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
288  if (instr->result()->IsRegister()) {
289  Register result_reg = ToRegister(instr->result());
290  __ movsxlq(result_reg, result_reg);
291  } else {
292  // Sign extend the 32bit result in the stack slots.
293  ASSERT(instr->result()->IsStackSlot());
294  Operand src = ToOperand(instr->result());
295  __ movsxlq(kScratchRegister, src);
296  __ movq(src, kScratchRegister);
297  }
298  }
299 }
300 
301 
302 bool LCodeGen::GenerateJumpTable() {
303  Label needs_frame;
304  if (jump_table_.length() > 0) {
305  Comment(";;; -------------------- Jump table --------------------");
306  }
307  for (int i = 0; i < jump_table_.length(); i++) {
308  __ bind(&jump_table_[i].label);
309  Address entry = jump_table_[i].address;
310  Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
311  int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
313  Comment(";;; jump table entry %d.", i);
314  } else {
315  Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
316  }
317  if (jump_table_[i].needs_frame) {
318  ASSERT(!info()->saves_caller_doubles());
319  __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
320  if (needs_frame.is_bound()) {
321  __ jmp(&needs_frame);
322  } else {
323  __ bind(&needs_frame);
325  __ pushq(rbp);
326  __ movp(rbp, rsp);
327  __ Push(rsi);
328  // This variant of deopt can only be used with stubs. Since we don't
329  // have a function pointer to install in the stack frame that we're
330  // building, install a special marker there instead.
331  ASSERT(info()->IsStub());
333  __ Push(rsi);
334  __ movp(rsi, MemOperand(rsp, kPointerSize));
335  __ call(kScratchRegister);
336  }
337  } else {
338  if (info()->saves_caller_doubles()) {
339  ASSERT(info()->IsStub());
340  RestoreCallerDoubles();
341  }
342  __ call(entry, RelocInfo::RUNTIME_ENTRY);
343  }
344  }
345  return !is_aborted();
346 }
347 
348 
349 bool LCodeGen::GenerateDeferredCode() {
350  ASSERT(is_generating());
351  if (deferred_.length() > 0) {
352  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
353  LDeferredCode* code = deferred_[i];
354 
355  HValue* value =
356  instructions_->at(code->instruction_index())->hydrogen_value();
357  RecordAndWritePosition(
358  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
359 
360  Comment(";;; <@%d,#%d> "
361  "-------------------- Deferred %s --------------------",
362  code->instruction_index(),
363  code->instr()->hydrogen_value()->id(),
364  code->instr()->Mnemonic());
365  __ bind(code->entry());
366  if (NeedsDeferredFrame()) {
367  Comment(";;; Build frame");
368  ASSERT(!frame_is_built_);
369  ASSERT(info()->IsStub());
370  frame_is_built_ = true;
371  // Build the frame in such a way that esi isn't trashed.
372  __ pushq(rbp); // Caller's frame pointer.
375  __ leap(rbp, Operand(rsp, 2 * kPointerSize));
376  Comment(";;; Deferred code");
377  }
378  code->Generate();
379  if (NeedsDeferredFrame()) {
380  __ bind(code->done());
381  Comment(";;; Destroy frame");
382  ASSERT(frame_is_built_);
383  frame_is_built_ = false;
384  __ movp(rsp, rbp);
385  __ popq(rbp);
386  }
387  __ jmp(code->exit());
388  }
389  }
390 
391  // Deferred code is the last part of the instruction sequence. Mark
392  // the generated code as done unless we bailed out.
393  if (!is_aborted()) status_ = DONE;
394  return !is_aborted();
395 }
396 
397 
398 bool LCodeGen::GenerateSafepointTable() {
399  ASSERT(is_done());
400  safepoints_.Emit(masm(), GetStackSlotCount());
401  return !is_aborted();
402 }
403 
404 
405 Register LCodeGen::ToRegister(int index) const {
406  return Register::FromAllocationIndex(index);
407 }
408 
409 
410 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
411  return XMMRegister::FromAllocationIndex(index);
412 }
413 
414 
415 Register LCodeGen::ToRegister(LOperand* op) const {
416  ASSERT(op->IsRegister());
417  return ToRegister(op->index());
418 }
419 
420 
421 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
422  ASSERT(op->IsDoubleRegister());
423  return ToDoubleRegister(op->index());
424 }
425 
426 
427 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
428  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
429 }
430 
431 
432 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
433  return op->IsConstantOperand() &&
434  chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
435 }
436 
437 
438 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
439  return chunk_->LookupLiteralRepresentation(op).IsSmi();
440 }
441 
442 
443 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
444  HConstant* constant = chunk_->LookupConstant(op);
445  return constant->Integer32Value();
446 }
447 
448 
449 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
450  HConstant* constant = chunk_->LookupConstant(op);
451  return Smi::FromInt(constant->Integer32Value());
452 }
453 
454 
455 double LCodeGen::ToDouble(LConstantOperand* op) const {
456  HConstant* constant = chunk_->LookupConstant(op);
457  ASSERT(constant->HasDoubleValue());
458  return constant->DoubleValue();
459 }
460 
461 
462 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
463  HConstant* constant = chunk_->LookupConstant(op);
464  ASSERT(constant->HasExternalReferenceValue());
465  return constant->ExternalReferenceValue();
466 }
467 
468 
469 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
470  HConstant* constant = chunk_->LookupConstant(op);
471  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
472  return constant->handle(isolate());
473 }
474 
475 
476 static int ArgumentsOffsetWithoutFrame(int index) {
477  ASSERT(index < 0);
478  return -(index + 1) * kPointerSize + kPCOnStackSize;
479 }
480 
481 
482 Operand LCodeGen::ToOperand(LOperand* op) const {
483  // Does not handle registers. In X64 assembler, plain registers are not
484  // representable as an Operand.
485  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
486  if (NeedsEagerFrame()) {
487  return Operand(rbp, StackSlotOffset(op->index()));
488  } else {
489  // Retrieve parameter without eager stack-frame relative to the
490  // stack-pointer.
491  return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
492  }
493 }
494 
495 
496 void LCodeGen::WriteTranslation(LEnvironment* environment,
497  Translation* translation) {
498  if (environment == NULL) return;
499 
500  // The translation includes one command per value in the environment.
501  int translation_size = environment->translation_size();
502  // The output frame height does not include the parameters.
503  int height = translation_size - environment->parameter_count();
504 
505  WriteTranslation(environment->outer(), translation);
506  bool has_closure_id = !info()->closure().is_null() &&
507  !info()->closure().is_identical_to(environment->closure());
508  int closure_id = has_closure_id
509  ? DefineDeoptimizationLiteral(environment->closure())
510  : Translation::kSelfLiteralId;
511 
512  switch (environment->frame_type()) {
513  case JS_FUNCTION:
514  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
515  break;
516  case JS_CONSTRUCT:
517  translation->BeginConstructStubFrame(closure_id, translation_size);
518  break;
519  case JS_GETTER:
520  ASSERT(translation_size == 1);
521  ASSERT(height == 0);
522  translation->BeginGetterStubFrame(closure_id);
523  break;
524  case JS_SETTER:
525  ASSERT(translation_size == 2);
526  ASSERT(height == 0);
527  translation->BeginSetterStubFrame(closure_id);
528  break;
529  case ARGUMENTS_ADAPTOR:
530  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
531  break;
532  case STUB:
533  translation->BeginCompiledStubFrame();
534  break;
535  }
536 
537  int object_index = 0;
538  int dematerialized_index = 0;
539  for (int i = 0; i < translation_size; ++i) {
540  LOperand* value = environment->values()->at(i);
541  AddToTranslation(environment,
542  translation,
543  value,
544  environment->HasTaggedValueAt(i),
545  environment->HasUint32ValueAt(i),
546  &object_index,
547  &dematerialized_index);
548  }
549 }
550 
551 
552 void LCodeGen::AddToTranslation(LEnvironment* environment,
553  Translation* translation,
554  LOperand* op,
555  bool is_tagged,
556  bool is_uint32,
557  int* object_index_pointer,
558  int* dematerialized_index_pointer) {
559  if (op == LEnvironment::materialization_marker()) {
560  int object_index = (*object_index_pointer)++;
561  if (environment->ObjectIsDuplicateAt(object_index)) {
562  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
563  translation->DuplicateObject(dupe_of);
564  return;
565  }
566  int object_length = environment->ObjectLengthAt(object_index);
567  if (environment->ObjectIsArgumentsAt(object_index)) {
568  translation->BeginArgumentsObject(object_length);
569  } else {
570  translation->BeginCapturedObject(object_length);
571  }
572  int dematerialized_index = *dematerialized_index_pointer;
573  int env_offset = environment->translation_size() + dematerialized_index;
574  *dematerialized_index_pointer += object_length;
575  for (int i = 0; i < object_length; ++i) {
576  LOperand* value = environment->values()->at(env_offset + i);
577  AddToTranslation(environment,
578  translation,
579  value,
580  environment->HasTaggedValueAt(env_offset + i),
581  environment->HasUint32ValueAt(env_offset + i),
582  object_index_pointer,
583  dematerialized_index_pointer);
584  }
585  return;
586  }
587 
588  if (op->IsStackSlot()) {
589  if (is_tagged) {
590  translation->StoreStackSlot(op->index());
591  } else if (is_uint32) {
592  translation->StoreUint32StackSlot(op->index());
593  } else {
594  translation->StoreInt32StackSlot(op->index());
595  }
596  } else if (op->IsDoubleStackSlot()) {
597  translation->StoreDoubleStackSlot(op->index());
598  } else if (op->IsRegister()) {
599  Register reg = ToRegister(op);
600  if (is_tagged) {
601  translation->StoreRegister(reg);
602  } else if (is_uint32) {
603  translation->StoreUint32Register(reg);
604  } else {
605  translation->StoreInt32Register(reg);
606  }
607  } else if (op->IsDoubleRegister()) {
608  XMMRegister reg = ToDoubleRegister(op);
609  translation->StoreDoubleRegister(reg);
610  } else if (op->IsConstantOperand()) {
611  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
612  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
613  translation->StoreLiteral(src_index);
614  } else {
615  UNREACHABLE();
616  }
617 }
618 
619 
620 void LCodeGen::CallCodeGeneric(Handle<Code> code,
621  RelocInfo::Mode mode,
622  LInstruction* instr,
623  SafepointMode safepoint_mode,
624  int argc) {
625  ASSERT(instr != NULL);
626  __ call(code, mode);
627  RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
628 
629  // Signal that we don't inline smi code before these stubs in the
630  // optimizing code generator.
631  if (code->kind() == Code::BINARY_OP_IC ||
632  code->kind() == Code::COMPARE_IC) {
633  __ nop();
634  }
635 }
636 
637 
638 void LCodeGen::CallCode(Handle<Code> code,
639  RelocInfo::Mode mode,
640  LInstruction* instr) {
641  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
642 }
643 
644 
645 void LCodeGen::CallRuntime(const Runtime::Function* function,
646  int num_arguments,
647  LInstruction* instr,
648  SaveFPRegsMode save_doubles) {
649  ASSERT(instr != NULL);
650  ASSERT(instr->HasPointerMap());
651 
652  __ CallRuntime(function, num_arguments, save_doubles);
653 
654  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
655 }
656 
657 
658 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
659  if (context->IsRegister()) {
660  if (!ToRegister(context).is(rsi)) {
661  __ movp(rsi, ToRegister(context));
662  }
663  } else if (context->IsStackSlot()) {
664  __ movp(rsi, ToOperand(context));
665  } else if (context->IsConstantOperand()) {
666  HConstant* constant =
667  chunk_->LookupConstant(LConstantOperand::cast(context));
668  __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
669  } else {
670  UNREACHABLE();
671  }
672 }
673 
674 
675 
676 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
677  int argc,
678  LInstruction* instr,
679  LOperand* context) {
680  LoadContextFromDeferred(context);
681 
682  __ CallRuntimeSaveDoubles(id);
683  RecordSafepointWithRegisters(
684  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
685 }
686 
687 
688 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
689  Safepoint::DeoptMode mode) {
690  if (!environment->HasBeenRegistered()) {
691  // Physical stack frame layout:
692  // -x ............. -4 0 ..................................... y
693  // [incoming arguments] [spill slots] [pushed outgoing arguments]
694 
695  // Layout of the environment:
696  // 0 ..................................................... size-1
697  // [parameters] [locals] [expression stack including arguments]
698 
699  // Layout of the translation:
700  // 0 ........................................................ size - 1 + 4
701  // [expression stack including arguments] [locals] [4 words] [parameters]
702  // |>------------ translation_size ------------<|
703 
704  int frame_count = 0;
705  int jsframe_count = 0;
706  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
707  ++frame_count;
708  if (e->frame_type() == JS_FUNCTION) {
709  ++jsframe_count;
710  }
711  }
712  Translation translation(&translations_, frame_count, jsframe_count, zone());
713  WriteTranslation(environment, &translation);
714  int deoptimization_index = deoptimizations_.length();
715  int pc_offset = masm()->pc_offset();
716  environment->Register(deoptimization_index,
717  translation.index(),
718  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
719  deoptimizations_.Add(environment, environment->zone());
720  }
721 }
722 
723 
724 void LCodeGen::DeoptimizeIf(Condition cc,
725  LEnvironment* environment,
726  Deoptimizer::BailoutType bailout_type) {
727  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
728  ASSERT(environment->HasBeenRegistered());
729  int id = environment->deoptimization_index();
730  ASSERT(info()->IsOptimizing() || info()->IsStub());
731  Address entry =
732  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
733  if (entry == NULL) {
734  Abort(kBailoutWasNotPrepared);
735  return;
736  }
737 
738  if (DeoptEveryNTimes()) {
739  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
740  Label no_deopt;
741  __ pushfq();
742  __ Push(rax);
743  Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
744  __ movl(rax, count_operand);
745  __ subl(rax, Immediate(1));
746  __ j(not_zero, &no_deopt, Label::kNear);
747  if (FLAG_trap_on_deopt) __ int3();
748  __ movl(rax, Immediate(FLAG_deopt_every_n_times));
749  __ movl(count_operand, rax);
750  __ Pop(rax);
751  __ popfq();
752  ASSERT(frame_is_built_);
753  __ call(entry, RelocInfo::RUNTIME_ENTRY);
754  __ bind(&no_deopt);
755  __ movl(count_operand, rax);
756  __ Pop(rax);
757  __ popfq();
758  }
759 
760  if (info()->ShouldTrapOnDeopt()) {
761  Label done;
762  if (cc != no_condition) {
763  __ j(NegateCondition(cc), &done, Label::kNear);
764  }
765  __ int3();
766  __ bind(&done);
767  }
768 
769  ASSERT(info()->IsStub() || frame_is_built_);
770  // Go through jump table if we need to handle condition, build frame, or
771  // restore caller doubles.
772  if (cc == no_condition && frame_is_built_ &&
773  !info()->saves_caller_doubles()) {
774  __ call(entry, RelocInfo::RUNTIME_ENTRY);
775  } else {
776  // We often have several deopts to the same entry, reuse the last
777  // jump entry if this is the case.
778  if (jump_table_.is_empty() ||
779  jump_table_.last().address != entry ||
780  jump_table_.last().needs_frame != !frame_is_built_ ||
781  jump_table_.last().bailout_type != bailout_type) {
782  Deoptimizer::JumpTableEntry table_entry(entry,
783  bailout_type,
784  !frame_is_built_);
785  jump_table_.Add(table_entry, zone());
786  }
787  if (cc == no_condition) {
788  __ jmp(&jump_table_.last().label);
789  } else {
790  __ j(cc, &jump_table_.last().label);
791  }
792  }
793 }
794 
795 
796 void LCodeGen::DeoptimizeIf(Condition cc,
797  LEnvironment* environment) {
798  Deoptimizer::BailoutType bailout_type = info()->IsStub()
801  DeoptimizeIf(cc, environment, bailout_type);
802 }
803 
804 
805 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
806  int length = deoptimizations_.length();
807  if (length == 0) return;
808  Handle<DeoptimizationInputData> data =
809  factory()->NewDeoptimizationInputData(length, TENURED);
810 
811  Handle<ByteArray> translations =
812  translations_.CreateByteArray(isolate()->factory());
813  data->SetTranslationByteArray(*translations);
814  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
815  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
816  if (info_->IsOptimizing()) {
817  // Reference to shared function info does not change between phases.
818  AllowDeferredHandleDereference allow_handle_dereference;
819  data->SetSharedFunctionInfo(*info_->shared_info());
820  } else {
821  data->SetSharedFunctionInfo(Smi::FromInt(0));
822  }
823 
824  Handle<FixedArray> literals =
825  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
826  { AllowDeferredHandleDereference copy_handles;
827  for (int i = 0; i < deoptimization_literals_.length(); i++) {
828  literals->set(i, *deoptimization_literals_[i]);
829  }
830  data->SetLiteralArray(*literals);
831  }
832 
833  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
834  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
835 
836  // Populate the deoptimization entries.
837  for (int i = 0; i < length; i++) {
838  LEnvironment* env = deoptimizations_[i];
839  data->SetAstId(i, env->ast_id());
840  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
841  data->SetArgumentsStackHeight(i,
842  Smi::FromInt(env->arguments_stack_height()));
843  data->SetPc(i, Smi::FromInt(env->pc_offset()));
844  }
845  code->set_deoptimization_data(*data);
846 }
847 
848 
849 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
850  int result = deoptimization_literals_.length();
851  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
852  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
853  }
854  deoptimization_literals_.Add(literal, zone());
855  return result;
856 }
857 
858 
859 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
860  ASSERT(deoptimization_literals_.length() == 0);
861 
862  const ZoneList<Handle<JSFunction> >* inlined_closures =
863  chunk()->inlined_closures();
864 
865  for (int i = 0, length = inlined_closures->length();
866  i < length;
867  i++) {
868  DefineDeoptimizationLiteral(inlined_closures->at(i));
869  }
870 
871  inlined_function_count_ = deoptimization_literals_.length();
872 }
873 
874 
875 void LCodeGen::RecordSafepointWithLazyDeopt(
876  LInstruction* instr, SafepointMode safepoint_mode, int argc) {
877  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
878  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
879  } else {
880  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
881  RecordSafepointWithRegisters(
882  instr->pointer_map(), argc, Safepoint::kLazyDeopt);
883  }
884 }
885 
886 
887 void LCodeGen::RecordSafepoint(
888  LPointerMap* pointers,
889  Safepoint::Kind kind,
890  int arguments,
891  Safepoint::DeoptMode deopt_mode) {
892  ASSERT(kind == expected_safepoint_kind_);
893 
894  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
895 
896  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
897  kind, arguments, deopt_mode);
898  for (int i = 0; i < operands->length(); i++) {
899  LOperand* pointer = operands->at(i);
900  if (pointer->IsStackSlot()) {
901  safepoint.DefinePointerSlot(pointer->index(), zone());
902  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
903  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
904  }
905  }
906 }
907 
908 
909 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
910  Safepoint::DeoptMode deopt_mode) {
911  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
912 }
913 
914 
915 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
916  LPointerMap empty_pointers(zone());
917  RecordSafepoint(&empty_pointers, deopt_mode);
918 }
919 
920 
921 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
922  int arguments,
923  Safepoint::DeoptMode deopt_mode) {
924  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
925 }
926 
927 
928 void LCodeGen::RecordAndWritePosition(int position) {
929  if (position == RelocInfo::kNoPosition) return;
930  masm()->positions_recorder()->RecordPosition(position);
931  masm()->positions_recorder()->WriteRecordedPositions();
932 }
933 
934 
935 static const char* LabelType(LLabel* label) {
936  if (label->is_loop_header()) return " (loop header)";
937  if (label->is_osr_entry()) return " (OSR entry)";
938  return "";
939 }
940 
941 
942 void LCodeGen::DoLabel(LLabel* label) {
943  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
944  current_instruction_,
945  label->hydrogen_value()->id(),
946  label->block_id(),
947  LabelType(label));
948  __ bind(label->label());
949  current_block_ = label->block_id();
950  DoGap(label);
951 }
952 
953 
954 void LCodeGen::DoParallelMove(LParallelMove* move) {
955  resolver_.Resolve(move);
956 }
957 
958 
959 void LCodeGen::DoGap(LGap* gap) {
960  for (int i = LGap::FIRST_INNER_POSITION;
962  i++) {
963  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
964  LParallelMove* move = gap->GetParallelMove(inner_pos);
965  if (move != NULL) DoParallelMove(move);
966  }
967 }
968 
969 
970 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
971  DoGap(instr);
972 }
973 
974 
975 void LCodeGen::DoParameter(LParameter* instr) {
976  // Nothing to do.
977 }
978 
979 
980 void LCodeGen::DoCallStub(LCallStub* instr) {
981  ASSERT(ToRegister(instr->context()).is(rsi));
982  ASSERT(ToRegister(instr->result()).is(rax));
983  switch (instr->hydrogen()->major_key()) {
984  case CodeStub::RegExpExec: {
985  RegExpExecStub stub;
986  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
987  break;
988  }
989  case CodeStub::SubString: {
990  SubStringStub stub;
991  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
992  break;
993  }
994  case CodeStub::StringCompare: {
995  StringCompareStub stub;
996  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
997  break;
998  }
999  default:
1000  UNREACHABLE();
1001  }
1002 }
1003 
1004 
1005 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1006  GenerateOsrPrologue();
1007 }
1008 
1009 
1010 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1011  Register dividend = ToRegister(instr->dividend());
1012  int32_t divisor = instr->divisor();
1013  ASSERT(dividend.is(ToRegister(instr->result())));
1014 
1015  // Theoretically, a variation of the branch-free code for integer division by
1016  // a power of 2 (calculating the remainder via an additional multiplication
1017  // (which gets simplified to an 'and') and subtraction) should be faster, and
1018  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1019  // indicate that positive dividends are heavily favored, so the branching
1020  // version performs better.
1021  HMod* hmod = instr->hydrogen();
1022  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1023  Label dividend_is_not_negative, done;
1024  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1025  __ testl(dividend, dividend);
1026  __ j(not_sign, &dividend_is_not_negative, Label::kNear);
1027  // Note that this is correct even for kMinInt operands.
1028  __ negl(dividend);
1029  __ andl(dividend, Immediate(mask));
1030  __ negl(dividend);
1031  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1032  DeoptimizeIf(zero, instr->environment());
1033  }
1034  __ jmp(&done, Label::kNear);
1035  }
1036 
1037  __ bind(&dividend_is_not_negative);
1038  __ andl(dividend, Immediate(mask));
1039  __ bind(&done);
1040 }
1041 
1042 
1043 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1044  Register dividend = ToRegister(instr->dividend());
1045  int32_t divisor = instr->divisor();
1046  ASSERT(ToRegister(instr->result()).is(rax));
1047 
1048  if (divisor == 0) {
1049  DeoptimizeIf(no_condition, instr->environment());
1050  return;
1051  }
1052 
1053  __ TruncatingDiv(dividend, Abs(divisor));
1054  __ imull(rdx, rdx, Immediate(Abs(divisor)));
1055  __ movl(rax, dividend);
1056  __ subl(rax, rdx);
1057 
1058  // Check for negative zero.
1059  HMod* hmod = instr->hydrogen();
1060  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1061  Label remainder_not_zero;
1062  __ j(not_zero, &remainder_not_zero, Label::kNear);
1063  __ cmpl(dividend, Immediate(0));
1064  DeoptimizeIf(less, instr->environment());
1065  __ bind(&remainder_not_zero);
1066  }
1067 }
1068 
1069 
1070 void LCodeGen::DoModI(LModI* instr) {
1071  HMod* hmod = instr->hydrogen();
1072 
1073  Register left_reg = ToRegister(instr->left());
1074  ASSERT(left_reg.is(rax));
1075  Register right_reg = ToRegister(instr->right());
1076  ASSERT(!right_reg.is(rax));
1077  ASSERT(!right_reg.is(rdx));
1078  Register result_reg = ToRegister(instr->result());
1079  ASSERT(result_reg.is(rdx));
1080 
1081  Label done;
1082  // Check for x % 0, idiv would signal a divide error. We have to
1083  // deopt in this case because we can't return a NaN.
1084  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1085  __ testl(right_reg, right_reg);
1086  DeoptimizeIf(zero, instr->environment());
1087  }
1088 
1089  // Check for kMinInt % -1, idiv would signal a divide error. We
1090  // have to deopt if we care about -0, because we can't return that.
1091  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1092  Label no_overflow_possible;
1093  __ cmpl(left_reg, Immediate(kMinInt));
1094  __ j(not_zero, &no_overflow_possible, Label::kNear);
1095  __ cmpl(right_reg, Immediate(-1));
1096  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1097  DeoptimizeIf(equal, instr->environment());
1098  } else {
1099  __ j(not_equal, &no_overflow_possible, Label::kNear);
1100  __ Set(result_reg, 0);
1101  __ jmp(&done, Label::kNear);
1102  }
1103  __ bind(&no_overflow_possible);
1104  }
1105 
1106  // Sign extend dividend in eax into edx:eax, since we are using only the low
1107  // 32 bits of the values.
1108  __ cdq();
1109 
1110  // If we care about -0, test if the dividend is <0 and the result is 0.
1111  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1112  Label positive_left;
1113  __ testl(left_reg, left_reg);
1114  __ j(not_sign, &positive_left, Label::kNear);
1115  __ idivl(right_reg);
1116  __ testl(result_reg, result_reg);
1117  DeoptimizeIf(zero, instr->environment());
1118  __ jmp(&done, Label::kNear);
1119  __ bind(&positive_left);
1120  }
1121  __ idivl(right_reg);
1122  __ bind(&done);
1123 }
1124 
1125 
1126 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1127  Register dividend = ToRegister(instr->dividend());
1128  int32_t divisor = instr->divisor();
1129  ASSERT(dividend.is(ToRegister(instr->result())));
1130 
1131  // If the divisor is positive, things are easy: There can be no deopts and we
1132  // can simply do an arithmetic right shift.
1133  if (divisor == 1) return;
1134  int32_t shift = WhichPowerOf2Abs(divisor);
1135  if (divisor > 1) {
1136  __ sarl(dividend, Immediate(shift));
1137  return;
1138  }
1139 
1140  // If the divisor is negative, we have to negate and handle edge cases.
1141  Label not_kmin_int, done;
1142  __ negl(dividend);
1143  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1144  DeoptimizeIf(zero, instr->environment());
1145  }
1146  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1147  // Note that we could emit branch-free code, but that would need one more
1148  // register.
1149  __ j(no_overflow, &not_kmin_int, Label::kNear);
1150  if (divisor == -1) {
1151  DeoptimizeIf(no_condition, instr->environment());
1152  } else {
1153  __ movl(dividend, Immediate(kMinInt / divisor));
1154  __ jmp(&done, Label::kNear);
1155  }
1156  }
1157  __ bind(&not_kmin_int);
1158  __ sarl(dividend, Immediate(shift));
1159  __ bind(&done);
1160 }
1161 
1162 
1163 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1164  Register dividend = ToRegister(instr->dividend());
1165  int32_t divisor = instr->divisor();
1166  ASSERT(ToRegister(instr->result()).is(rdx));
1167 
1168  if (divisor == 0) {
1169  DeoptimizeIf(no_condition, instr->environment());
1170  return;
1171  }
1172 
1173  // Check for (0 / -x) that will produce negative zero.
1174  HMathFloorOfDiv* hdiv = instr->hydrogen();
1175  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1176  __ testl(dividend, dividend);
1177  DeoptimizeIf(zero, instr->environment());
1178  }
1179 
1180  // Easy case: We need no dynamic check for the dividend and the flooring
1181  // division is the same as the truncating division.
1182  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1183  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1184  __ TruncatingDiv(dividend, Abs(divisor));
1185  if (divisor < 0) __ negl(rdx);
1186  return;
1187  }
1188 
1189  // In the general case we may need to adjust before and after the truncating
1190  // division to get a flooring division.
1191  Register temp = ToRegister(instr->temp3());
1192  ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1193  Label needs_adjustment, done;
1194  __ cmpl(dividend, Immediate(0));
1195  __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1196  __ TruncatingDiv(dividend, Abs(divisor));
1197  if (divisor < 0) __ negl(rdx);
1198  __ jmp(&done, Label::kNear);
1199  __ bind(&needs_adjustment);
1200  __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1201  __ TruncatingDiv(temp, Abs(divisor));
1202  if (divisor < 0) __ negl(rdx);
1203  __ decl(rdx);
1204  __ bind(&done);
1205 }
1206 
1207 
1208 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1209  Register dividend = ToRegister(instr->dividend());
1210  int32_t divisor = instr->divisor();
1211  Register result = ToRegister(instr->result());
1212  ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
1213  ASSERT(!result.is(dividend));
1214 
1215  // Check for (0 / -x) that will produce negative zero.
1216  HDiv* hdiv = instr->hydrogen();
1217  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1218  __ testl(dividend, dividend);
1219  DeoptimizeIf(zero, instr->environment());
1220  }
1221  // Check for (kMinInt / -1).
1222  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1223  __ cmpl(dividend, Immediate(kMinInt));
1224  DeoptimizeIf(zero, instr->environment());
1225  }
1226  // Deoptimize if remainder will not be 0.
1227  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1228  divisor != 1 && divisor != -1) {
1229  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1230  __ testl(dividend, Immediate(mask));
1231  DeoptimizeIf(not_zero, instr->environment());
1232  }
1233  __ Move(result, dividend);
1234  int32_t shift = WhichPowerOf2Abs(divisor);
1235  if (shift > 0) {
1236  // The arithmetic shift is always OK, the 'if' is an optimization only.
1237  if (shift > 1) __ sarl(result, Immediate(31));
1238  __ shrl(result, Immediate(32 - shift));
1239  __ addl(result, dividend);
1240  __ sarl(result, Immediate(shift));
1241  }
1242  if (divisor < 0) __ negl(result);
1243 }
1244 
1245 
1246 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1247  Register dividend = ToRegister(instr->dividend());
1248  int32_t divisor = instr->divisor();
1249  ASSERT(ToRegister(instr->result()).is(rdx));
1250 
1251  if (divisor == 0) {
1252  DeoptimizeIf(no_condition, instr->environment());
1253  return;
1254  }
1255 
1256  // Check for (0 / -x) that will produce negative zero.
1257  HDiv* hdiv = instr->hydrogen();
1258  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1259  __ testl(dividend, dividend);
1260  DeoptimizeIf(zero, instr->environment());
1261  }
1262 
1263  __ TruncatingDiv(dividend, Abs(divisor));
1264  if (divisor < 0) __ negp(rdx);
1265 
1266  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1267  __ movl(rax, rdx);
1268  __ imull(rax, rax, Immediate(divisor));
1269  __ subl(rax, dividend);
1270  DeoptimizeIf(not_equal, instr->environment());
1271  }
1272 }
1273 
1274 
1275 void LCodeGen::DoDivI(LDivI* instr) {
1276  HBinaryOperation* hdiv = instr->hydrogen();
1277  Register dividend = ToRegister(instr->left());
1278  Register divisor = ToRegister(instr->right());
1279  Register remainder = ToRegister(instr->temp());
1280  Register result = ToRegister(instr->result());
1281  ASSERT(dividend.is(rax));
1282  ASSERT(remainder.is(rdx));
1283  ASSERT(result.is(rax));
1284  ASSERT(!divisor.is(rax));
1285  ASSERT(!divisor.is(rdx));
1286 
1287  // Check for x / 0.
1288  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1289  __ testl(divisor, divisor);
1290  DeoptimizeIf(zero, instr->environment());
1291  }
1292 
1293  // Check for (0 / -x) that will produce negative zero.
1294  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1295  Label dividend_not_zero;
1296  __ testl(dividend, dividend);
1297  __ j(not_zero, &dividend_not_zero, Label::kNear);
1298  __ testl(divisor, divisor);
1299  DeoptimizeIf(sign, instr->environment());
1300  __ bind(&dividend_not_zero);
1301  }
1302 
1303  // Check for (kMinInt / -1).
1304  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1305  Label dividend_not_min_int;
1306  __ cmpl(dividend, Immediate(kMinInt));
1307  __ j(not_zero, &dividend_not_min_int, Label::kNear);
1308  __ cmpl(divisor, Immediate(-1));
1309  DeoptimizeIf(zero, instr->environment());
1310  __ bind(&dividend_not_min_int);
1311  }
1312 
1313  // Sign extend to rdx (= remainder).
1314  __ cdq();
1315  __ idivl(divisor);
1316 
1317  if (hdiv->IsMathFloorOfDiv()) {
1318  Label done;
1319  __ testl(remainder, remainder);
1320  __ j(zero, &done, Label::kNear);
1321  __ xorl(remainder, divisor);
1322  __ sarl(remainder, Immediate(31));
1323  __ addl(result, remainder);
1324  __ bind(&done);
1325  } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1326  // Deoptimize if remainder is not 0.
1327  __ testl(remainder, remainder);
1328  DeoptimizeIf(not_zero, instr->environment());
1329  }
1330 }
1331 
1332 
1333 void LCodeGen::DoMulI(LMulI* instr) {
1334  Register left = ToRegister(instr->left());
1335  LOperand* right = instr->right();
1336 
1337  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1338  if (instr->hydrogen_value()->representation().IsSmi()) {
1339  __ movp(kScratchRegister, left);
1340  } else {
1341  __ movl(kScratchRegister, left);
1342  }
1343  }
1344 
1345  bool can_overflow =
1346  instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1347  if (right->IsConstantOperand()) {
1348  int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1349  if (right_value == -1) {
1350  __ negl(left);
1351  } else if (right_value == 0) {
1352  __ xorl(left, left);
1353  } else if (right_value == 2) {
1354  __ addl(left, left);
1355  } else if (!can_overflow) {
1356  // If the multiplication is known to not overflow, we
1357  // can use operations that don't set the overflow flag
1358  // correctly.
1359  switch (right_value) {
1360  case 1:
1361  // Do nothing.
1362  break;
1363  case 3:
1364  __ leal(left, Operand(left, left, times_2, 0));
1365  break;
1366  case 4:
1367  __ shll(left, Immediate(2));
1368  break;
1369  case 5:
1370  __ leal(left, Operand(left, left, times_4, 0));
1371  break;
1372  case 8:
1373  __ shll(left, Immediate(3));
1374  break;
1375  case 9:
1376  __ leal(left, Operand(left, left, times_8, 0));
1377  break;
1378  case 16:
1379  __ shll(left, Immediate(4));
1380  break;
1381  default:
1382  __ imull(left, left, Immediate(right_value));
1383  break;
1384  }
1385  } else {
1386  __ imull(left, left, Immediate(right_value));
1387  }
1388  } else if (right->IsStackSlot()) {
1389  if (instr->hydrogen_value()->representation().IsSmi()) {
1390  __ SmiToInteger64(left, left);
1391  __ imulp(left, ToOperand(right));
1392  } else {
1393  __ imull(left, ToOperand(right));
1394  }
1395  } else {
1396  if (instr->hydrogen_value()->representation().IsSmi()) {
1397  __ SmiToInteger64(left, left);
1398  __ imulp(left, ToRegister(right));
1399  } else {
1400  __ imull(left, ToRegister(right));
1401  }
1402  }
1403 
1404  if (can_overflow) {
1405  DeoptimizeIf(overflow, instr->environment());
1406  }
1407 
1408  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1409  // Bail out if the result is supposed to be negative zero.
1410  Label done;
1411  if (instr->hydrogen_value()->representation().IsSmi()) {
1412  __ testp(left, left);
1413  } else {
1414  __ testl(left, left);
1415  }
1416  __ j(not_zero, &done, Label::kNear);
1417  if (right->IsConstantOperand()) {
1418  // Constant can't be represented as Smi due to immediate size limit.
1419  ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1420  if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1421  DeoptimizeIf(no_condition, instr->environment());
1422  } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1423  __ cmpl(kScratchRegister, Immediate(0));
1424  DeoptimizeIf(less, instr->environment());
1425  }
1426  } else if (right->IsStackSlot()) {
1427  if (instr->hydrogen_value()->representation().IsSmi()) {
1428  __ orp(kScratchRegister, ToOperand(right));
1429  } else {
1430  __ orl(kScratchRegister, ToOperand(right));
1431  }
1432  DeoptimizeIf(sign, instr->environment());
1433  } else {
1434  // Test the non-zero operand for negative sign.
1435  if (instr->hydrogen_value()->representation().IsSmi()) {
1436  __ orp(kScratchRegister, ToRegister(right));
1437  } else {
1438  __ orl(kScratchRegister, ToRegister(right));
1439  }
1440  DeoptimizeIf(sign, instr->environment());
1441  }
1442  __ bind(&done);
1443  }
1444 }
1445 
1446 
1447 void LCodeGen::DoBitI(LBitI* instr) {
1448  LOperand* left = instr->left();
1449  LOperand* right = instr->right();
1450  ASSERT(left->Equals(instr->result()));
1451  ASSERT(left->IsRegister());
1452 
1453  if (right->IsConstantOperand()) {
1454  int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
1455  switch (instr->op()) {
1456  case Token::BIT_AND:
1457  __ andl(ToRegister(left), Immediate(right_operand));
1458  break;
1459  case Token::BIT_OR:
1460  __ orl(ToRegister(left), Immediate(right_operand));
1461  break;
1462  case Token::BIT_XOR:
1463  if (right_operand == int32_t(~0)) {
1464  __ notl(ToRegister(left));
1465  } else {
1466  __ xorl(ToRegister(left), Immediate(right_operand));
1467  }
1468  break;
1469  default:
1470  UNREACHABLE();
1471  break;
1472  }
1473  } else if (right->IsStackSlot()) {
1474  switch (instr->op()) {
1475  case Token::BIT_AND:
1476  __ andp(ToRegister(left), ToOperand(right));
1477  break;
1478  case Token::BIT_OR:
1479  __ orp(ToRegister(left), ToOperand(right));
1480  break;
1481  case Token::BIT_XOR:
1482  __ xorp(ToRegister(left), ToOperand(right));
1483  break;
1484  default:
1485  UNREACHABLE();
1486  break;
1487  }
1488  } else {
1489  ASSERT(right->IsRegister());
1490  switch (instr->op()) {
1491  case Token::BIT_AND:
1492  __ andp(ToRegister(left), ToRegister(right));
1493  break;
1494  case Token::BIT_OR:
1495  __ orp(ToRegister(left), ToRegister(right));
1496  break;
1497  case Token::BIT_XOR:
1498  __ xorp(ToRegister(left), ToRegister(right));
1499  break;
1500  default:
1501  UNREACHABLE();
1502  break;
1503  }
1504  }
1505 }
1506 
1507 
1508 void LCodeGen::DoShiftI(LShiftI* instr) {
1509  LOperand* left = instr->left();
1510  LOperand* right = instr->right();
1511  ASSERT(left->Equals(instr->result()));
1512  ASSERT(left->IsRegister());
1513  if (right->IsRegister()) {
1514  ASSERT(ToRegister(right).is(rcx));
1515 
1516  switch (instr->op()) {
1517  case Token::ROR:
1518  __ rorl_cl(ToRegister(left));
1519  break;
1520  case Token::SAR:
1521  __ sarl_cl(ToRegister(left));
1522  break;
1523  case Token::SHR:
1524  __ shrl_cl(ToRegister(left));
1525  if (instr->can_deopt()) {
1526  __ testl(ToRegister(left), ToRegister(left));
1527  DeoptimizeIf(negative, instr->environment());
1528  }
1529  break;
1530  case Token::SHL:
1531  __ shll_cl(ToRegister(left));
1532  break;
1533  default:
1534  UNREACHABLE();
1535  break;
1536  }
1537  } else {
1538  int32_t value = ToInteger32(LConstantOperand::cast(right));
1539  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1540  switch (instr->op()) {
1541  case Token::ROR:
1542  if (shift_count != 0) {
1543  __ rorl(ToRegister(left), Immediate(shift_count));
1544  }
1545  break;
1546  case Token::SAR:
1547  if (shift_count != 0) {
1548  __ sarl(ToRegister(left), Immediate(shift_count));
1549  }
1550  break;
1551  case Token::SHR:
1552  if (shift_count == 0 && instr->can_deopt()) {
1553  __ testl(ToRegister(left), ToRegister(left));
1554  DeoptimizeIf(negative, instr->environment());
1555  } else {
1556  __ shrl(ToRegister(left), Immediate(shift_count));
1557  }
1558  break;
1559  case Token::SHL:
1560  if (shift_count != 0) {
1561  if (instr->hydrogen_value()->representation().IsSmi()) {
1562  __ shl(ToRegister(left), Immediate(shift_count));
1563  } else {
1564  __ shll(ToRegister(left), Immediate(shift_count));
1565  }
1566  }
1567  break;
1568  default:
1569  UNREACHABLE();
1570  break;
1571  }
1572  }
1573 }
1574 
1575 
1576 void LCodeGen::DoSubI(LSubI* instr) {
1577  LOperand* left = instr->left();
1578  LOperand* right = instr->right();
1579  ASSERT(left->Equals(instr->result()));
1580 
1581  if (right->IsConstantOperand()) {
1582  __ subl(ToRegister(left),
1583  Immediate(ToInteger32(LConstantOperand::cast(right))));
1584  } else if (right->IsRegister()) {
1585  if (instr->hydrogen_value()->representation().IsSmi()) {
1586  __ subp(ToRegister(left), ToRegister(right));
1587  } else {
1588  __ subl(ToRegister(left), ToRegister(right));
1589  }
1590  } else {
1591  if (instr->hydrogen_value()->representation().IsSmi()) {
1592  __ subp(ToRegister(left), ToOperand(right));
1593  } else {
1594  __ subl(ToRegister(left), ToOperand(right));
1595  }
1596  }
1597 
1598  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1599  DeoptimizeIf(overflow, instr->environment());
1600  }
1601 }
1602 
1603 
1604 void LCodeGen::DoConstantI(LConstantI* instr) {
1605  __ Set(ToRegister(instr->result()), instr->value());
1606 }
1607 
1608 
1609 void LCodeGen::DoConstantS(LConstantS* instr) {
1610  __ Move(ToRegister(instr->result()), instr->value());
1611 }
1612 
1613 
1614 void LCodeGen::DoConstantD(LConstantD* instr) {
1615  ASSERT(instr->result()->IsDoubleRegister());
1616  XMMRegister res = ToDoubleRegister(instr->result());
1617  double v = instr->value();
1618  uint64_t int_val = BitCast<uint64_t, double>(v);
1619  // Use xor to produce +0.0 in a fast and compact way, but avoid to
1620  // do so if the constant is -0.0.
1621  if (int_val == 0) {
1622  __ xorps(res, res);
1623  } else {
1624  Register tmp = ToRegister(instr->temp());
1625  __ Set(tmp, int_val);
1626  __ movq(res, tmp);
1627  }
1628 }
1629 
1630 
1631 void LCodeGen::DoConstantE(LConstantE* instr) {
1632  __ LoadAddress(ToRegister(instr->result()), instr->value());
1633 }
1634 
1635 
1636 void LCodeGen::DoConstantT(LConstantT* instr) {
1637  Handle<Object> value = instr->value(isolate());
1638  __ Move(ToRegister(instr->result()), value);
1639 }
1640 
1641 
1642 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1643  Register result = ToRegister(instr->result());
1644  Register map = ToRegister(instr->value());
1645  __ EnumLength(result, map);
1646 }
1647 
1648 
1649 void LCodeGen::DoDateField(LDateField* instr) {
1650  Register object = ToRegister(instr->date());
1651  Register result = ToRegister(instr->result());
1652  Smi* index = instr->index();
1653  Label runtime, done, not_date_object;
1654  ASSERT(object.is(result));
1655  ASSERT(object.is(rax));
1656 
1657  Condition cc = masm()->CheckSmi(object);
1658  DeoptimizeIf(cc, instr->environment());
1659  __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1660  DeoptimizeIf(not_equal, instr->environment());
1661 
1662  if (index->value() == 0) {
1663  __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1664  } else {
1665  if (index->value() < JSDate::kFirstUncachedField) {
1666  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1667  Operand stamp_operand = __ ExternalOperand(stamp);
1668  __ movp(kScratchRegister, stamp_operand);
1669  __ cmpp(kScratchRegister, FieldOperand(object,
1671  __ j(not_equal, &runtime, Label::kNear);
1672  __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1673  kPointerSize * index->value()));
1674  __ jmp(&done, Label::kNear);
1675  }
1676  __ bind(&runtime);
1677  __ PrepareCallCFunction(2);
1678  __ movp(arg_reg_1, object);
1679  __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1680  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1681  __ bind(&done);
1682  }
1683 }
1684 
1685 
1686 Operand LCodeGen::BuildSeqStringOperand(Register string,
1687  LOperand* index,
1688  String::Encoding encoding) {
1689  if (index->IsConstantOperand()) {
1690  int offset = ToInteger32(LConstantOperand::cast(index));
1691  if (encoding == String::TWO_BYTE_ENCODING) {
1692  offset *= kUC16Size;
1693  }
1694  STATIC_ASSERT(kCharSize == 1);
1695  return FieldOperand(string, SeqString::kHeaderSize + offset);
1696  }
1697  return FieldOperand(
1698  string, ToRegister(index),
1699  encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1701 }
1702 
1703 
1704 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1705  String::Encoding encoding = instr->hydrogen()->encoding();
1706  Register result = ToRegister(instr->result());
1707  Register string = ToRegister(instr->string());
1708 
1709  if (FLAG_debug_code) {
1710  __ Push(string);
1711  __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1712  __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1713 
1714  __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1715  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1716  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1717  __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1718  ? one_byte_seq_type : two_byte_seq_type));
1719  __ Check(equal, kUnexpectedStringType);
1720  __ Pop(string);
1721  }
1722 
1723  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1724  if (encoding == String::ONE_BYTE_ENCODING) {
1725  __ movzxbl(result, operand);
1726  } else {
1727  __ movzxwl(result, operand);
1728  }
1729 }
1730 
1731 
1732 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1733  String::Encoding encoding = instr->hydrogen()->encoding();
1734  Register string = ToRegister(instr->string());
1735 
1736  if (FLAG_debug_code) {
1737  Register value = ToRegister(instr->value());
1738  Register index = ToRegister(instr->index());
1739  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1740  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1741  int encoding_mask =
1742  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1743  ? one_byte_seq_type : two_byte_seq_type;
1744  __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1745  }
1746 
1747  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1748  if (instr->value()->IsConstantOperand()) {
1749  int value = ToInteger32(LConstantOperand::cast(instr->value()));
1750  ASSERT_LE(0, value);
1751  if (encoding == String::ONE_BYTE_ENCODING) {
1753  __ movb(operand, Immediate(value));
1754  } else {
1756  __ movw(operand, Immediate(value));
1757  }
1758  } else {
1759  Register value = ToRegister(instr->value());
1760  if (encoding == String::ONE_BYTE_ENCODING) {
1761  __ movb(operand, value);
1762  } else {
1763  __ movw(operand, value);
1764  }
1765  }
1766 }
1767 
1768 
1769 void LCodeGen::DoAddI(LAddI* instr) {
1770  LOperand* left = instr->left();
1771  LOperand* right = instr->right();
1772 
1773  Representation target_rep = instr->hydrogen()->representation();
1774  bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1775 
1776  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1777  if (right->IsConstantOperand()) {
1778  int32_t offset = ToInteger32(LConstantOperand::cast(right));
1779  if (is_p) {
1780  __ leap(ToRegister(instr->result()),
1781  MemOperand(ToRegister(left), offset));
1782  } else {
1783  __ leal(ToRegister(instr->result()),
1784  MemOperand(ToRegister(left), offset));
1785  }
1786  } else {
1787  Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1788  if (is_p) {
1789  __ leap(ToRegister(instr->result()), address);
1790  } else {
1791  __ leal(ToRegister(instr->result()), address);
1792  }
1793  }
1794  } else {
1795  if (right->IsConstantOperand()) {
1796  if (is_p) {
1797  __ addp(ToRegister(left),
1798  Immediate(ToInteger32(LConstantOperand::cast(right))));
1799  } else {
1800  __ addl(ToRegister(left),
1801  Immediate(ToInteger32(LConstantOperand::cast(right))));
1802  }
1803  } else if (right->IsRegister()) {
1804  if (is_p) {
1805  __ addp(ToRegister(left), ToRegister(right));
1806  } else {
1807  __ addl(ToRegister(left), ToRegister(right));
1808  }
1809  } else {
1810  if (is_p) {
1811  __ addp(ToRegister(left), ToOperand(right));
1812  } else {
1813  __ addl(ToRegister(left), ToOperand(right));
1814  }
1815  }
1816  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1817  DeoptimizeIf(overflow, instr->environment());
1818  }
1819  }
1820 }
1821 
1822 
1823 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1824  LOperand* left = instr->left();
1825  LOperand* right = instr->right();
1826  ASSERT(left->Equals(instr->result()));
1827  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1828  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1829  Label return_left;
1830  Condition condition = (operation == HMathMinMax::kMathMin)
1831  ? less_equal
1832  : greater_equal;
1833  Register left_reg = ToRegister(left);
1834  if (right->IsConstantOperand()) {
1835  Immediate right_imm =
1836  Immediate(ToInteger32(LConstantOperand::cast(right)));
1837  ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1838  __ cmpl(left_reg, right_imm);
1839  __ j(condition, &return_left, Label::kNear);
1840  __ movp(left_reg, right_imm);
1841  } else if (right->IsRegister()) {
1842  Register right_reg = ToRegister(right);
1843  if (instr->hydrogen_value()->representation().IsSmi()) {
1844  __ cmpp(left_reg, right_reg);
1845  } else {
1846  __ cmpl(left_reg, right_reg);
1847  }
1848  __ j(condition, &return_left, Label::kNear);
1849  __ movp(left_reg, right_reg);
1850  } else {
1851  Operand right_op = ToOperand(right);
1852  if (instr->hydrogen_value()->representation().IsSmi()) {
1853  __ cmpp(left_reg, right_op);
1854  } else {
1855  __ cmpl(left_reg, right_op);
1856  }
1857  __ j(condition, &return_left, Label::kNear);
1858  __ movp(left_reg, right_op);
1859  }
1860  __ bind(&return_left);
1861  } else {
1862  ASSERT(instr->hydrogen()->representation().IsDouble());
1863  Label check_nan_left, check_zero, return_left, return_right;
1864  Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1865  XMMRegister left_reg = ToDoubleRegister(left);
1866  XMMRegister right_reg = ToDoubleRegister(right);
1867  __ ucomisd(left_reg, right_reg);
1868  __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1869  __ j(equal, &check_zero, Label::kNear); // left == right.
1870  __ j(condition, &return_left, Label::kNear);
1871  __ jmp(&return_right, Label::kNear);
1872 
1873  __ bind(&check_zero);
1874  XMMRegister xmm_scratch = double_scratch0();
1875  __ xorps(xmm_scratch, xmm_scratch);
1876  __ ucomisd(left_reg, xmm_scratch);
1877  __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1878  // At this point, both left and right are either 0 or -0.
1879  if (operation == HMathMinMax::kMathMin) {
1880  __ orps(left_reg, right_reg);
1881  } else {
1882  // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1883  __ addsd(left_reg, right_reg);
1884  }
1885  __ jmp(&return_left, Label::kNear);
1886 
1887  __ bind(&check_nan_left);
1888  __ ucomisd(left_reg, left_reg); // NaN check.
1889  __ j(parity_even, &return_left, Label::kNear);
1890  __ bind(&return_right);
1891  __ movaps(left_reg, right_reg);
1892 
1893  __ bind(&return_left);
1894  }
1895 }
1896 
1897 
1898 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1899  XMMRegister left = ToDoubleRegister(instr->left());
1900  XMMRegister right = ToDoubleRegister(instr->right());
1901  XMMRegister result = ToDoubleRegister(instr->result());
1902  // All operations except MOD are computed in-place.
1903  ASSERT(instr->op() == Token::MOD || left.is(result));
1904  switch (instr->op()) {
1905  case Token::ADD:
1906  __ addsd(left, right);
1907  break;
1908  case Token::SUB:
1909  __ subsd(left, right);
1910  break;
1911  case Token::MUL:
1912  __ mulsd(left, right);
1913  break;
1914  case Token::DIV:
1915  __ divsd(left, right);
1916  // Don't delete this mov. It may improve performance on some CPUs,
1917  // when there is a mulsd depending on the result
1918  __ movaps(left, left);
1919  break;
1920  case Token::MOD: {
1921  XMMRegister xmm_scratch = double_scratch0();
1922  __ PrepareCallCFunction(2);
1923  __ movaps(xmm_scratch, left);
1924  ASSERT(right.is(xmm1));
1925  __ CallCFunction(
1926  ExternalReference::mod_two_doubles_operation(isolate()), 2);
1927  __ movaps(result, xmm_scratch);
1928  break;
1929  }
1930  default:
1931  UNREACHABLE();
1932  break;
1933  }
1934 }
1935 
1936 
1937 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1938  ASSERT(ToRegister(instr->context()).is(rsi));
1939  ASSERT(ToRegister(instr->left()).is(rdx));
1940  ASSERT(ToRegister(instr->right()).is(rax));
1941  ASSERT(ToRegister(instr->result()).is(rax));
1942 
1943  BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
1944  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1945 }
1946 
1947 
1948 template<class InstrType>
1949 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
1950  int left_block = instr->TrueDestination(chunk_);
1951  int right_block = instr->FalseDestination(chunk_);
1952 
1953  int next_block = GetNextEmittedBlock();
1954 
1955  if (right_block == left_block || cc == no_condition) {
1956  EmitGoto(left_block);
1957  } else if (left_block == next_block) {
1958  __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1959  } else if (right_block == next_block) {
1960  __ j(cc, chunk_->GetAssemblyLabel(left_block));
1961  } else {
1962  __ j(cc, chunk_->GetAssemblyLabel(left_block));
1963  if (cc != always) {
1964  __ jmp(chunk_->GetAssemblyLabel(right_block));
1965  }
1966  }
1967 }
1968 
1969 
1970 template<class InstrType>
1971 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
1972  int false_block = instr->FalseDestination(chunk_);
1973  __ j(cc, chunk_->GetAssemblyLabel(false_block));
1974 }
1975 
1976 
1977 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
1978  __ int3();
1979 }
1980 
1981 
1982 void LCodeGen::DoBranch(LBranch* instr) {
1983  Representation r = instr->hydrogen()->value()->representation();
1984  if (r.IsInteger32()) {
1985  ASSERT(!info()->IsStub());
1986  Register reg = ToRegister(instr->value());
1987  __ testl(reg, reg);
1988  EmitBranch(instr, not_zero);
1989  } else if (r.IsSmi()) {
1990  ASSERT(!info()->IsStub());
1991  Register reg = ToRegister(instr->value());
1992  __ testp(reg, reg);
1993  EmitBranch(instr, not_zero);
1994  } else if (r.IsDouble()) {
1995  ASSERT(!info()->IsStub());
1996  XMMRegister reg = ToDoubleRegister(instr->value());
1997  XMMRegister xmm_scratch = double_scratch0();
1998  __ xorps(xmm_scratch, xmm_scratch);
1999  __ ucomisd(reg, xmm_scratch);
2000  EmitBranch(instr, not_equal);
2001  } else {
2002  ASSERT(r.IsTagged());
2003  Register reg = ToRegister(instr->value());
2004  HType type = instr->hydrogen()->value()->type();
2005  if (type.IsBoolean()) {
2006  ASSERT(!info()->IsStub());
2007  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2008  EmitBranch(instr, equal);
2009  } else if (type.IsSmi()) {
2010  ASSERT(!info()->IsStub());
2011  __ SmiCompare(reg, Smi::FromInt(0));
2012  EmitBranch(instr, not_equal);
2013  } else if (type.IsJSArray()) {
2014  ASSERT(!info()->IsStub());
2015  EmitBranch(instr, no_condition);
2016  } else if (type.IsHeapNumber()) {
2017  ASSERT(!info()->IsStub());
2018  XMMRegister xmm_scratch = double_scratch0();
2019  __ xorps(xmm_scratch, xmm_scratch);
2020  __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2021  EmitBranch(instr, not_equal);
2022  } else if (type.IsString()) {
2023  ASSERT(!info()->IsStub());
2024  __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2025  EmitBranch(instr, not_equal);
2026  } else {
2027  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2028  // Avoid deopts in the case where we've never executed this path before.
2029  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2030 
2031  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2032  // undefined -> false.
2033  __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2034  __ j(equal, instr->FalseLabel(chunk_));
2035  }
2036  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2037  // true -> true.
2038  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2039  __ j(equal, instr->TrueLabel(chunk_));
2040  // false -> false.
2041  __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2042  __ j(equal, instr->FalseLabel(chunk_));
2043  }
2044  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2045  // 'null' -> false.
2046  __ CompareRoot(reg, Heap::kNullValueRootIndex);
2047  __ j(equal, instr->FalseLabel(chunk_));
2048  }
2049 
2050  if (expected.Contains(ToBooleanStub::SMI)) {
2051  // Smis: 0 -> false, all other -> true.
2052  __ Cmp(reg, Smi::FromInt(0));
2053  __ j(equal, instr->FalseLabel(chunk_));
2054  __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2055  } else if (expected.NeedsMap()) {
2056  // If we need a map later and have a Smi -> deopt.
2057  __ testb(reg, Immediate(kSmiTagMask));
2058  DeoptimizeIf(zero, instr->environment());
2059  }
2060 
2061  const Register map = kScratchRegister;
2062  if (expected.NeedsMap()) {
2063  __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2064 
2065  if (expected.CanBeUndetectable()) {
2066  // Undetectable -> false.
2067  __ testb(FieldOperand(map, Map::kBitFieldOffset),
2068  Immediate(1 << Map::kIsUndetectable));
2069  __ j(not_zero, instr->FalseLabel(chunk_));
2070  }
2071  }
2072 
2073  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2074  // spec object -> true.
2075  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2076  __ j(above_equal, instr->TrueLabel(chunk_));
2077  }
2078 
2079  if (expected.Contains(ToBooleanStub::STRING)) {
2080  // String value -> false iff empty.
2081  Label not_string;
2082  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2083  __ j(above_equal, &not_string, Label::kNear);
2084  __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2085  __ j(not_zero, instr->TrueLabel(chunk_));
2086  __ jmp(instr->FalseLabel(chunk_));
2087  __ bind(&not_string);
2088  }
2089 
2090  if (expected.Contains(ToBooleanStub::SYMBOL)) {
2091  // Symbol value -> true.
2092  __ CmpInstanceType(map, SYMBOL_TYPE);
2093  __ j(equal, instr->TrueLabel(chunk_));
2094  }
2095 
2096  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2097  // heap number -> false iff +0, -0, or NaN.
2098  Label not_heap_number;
2099  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2100  __ j(not_equal, &not_heap_number, Label::kNear);
2101  XMMRegister xmm_scratch = double_scratch0();
2102  __ xorps(xmm_scratch, xmm_scratch);
2103  __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2104  __ j(zero, instr->FalseLabel(chunk_));
2105  __ jmp(instr->TrueLabel(chunk_));
2106  __ bind(&not_heap_number);
2107  }
2108 
2109  if (!expected.IsGeneric()) {
2110  // We've seen something for the first time -> deopt.
2111  // This can only happen if we are not generic already.
2112  DeoptimizeIf(no_condition, instr->environment());
2113  }
2114  }
2115  }
2116 }
2117 
2118 
2119 void LCodeGen::EmitGoto(int block) {
2120  if (!IsNextEmittedBlock(block)) {
2121  __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2122  }
2123 }
2124 
2125 
2126 void LCodeGen::DoGoto(LGoto* instr) {
2127  EmitGoto(instr->block_id());
2128 }
2129 
2130 
2131 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2132  Condition cond = no_condition;
2133  switch (op) {
2134  case Token::EQ:
2135  case Token::EQ_STRICT:
2136  cond = equal;
2137  break;
2138  case Token::NE:
2139  case Token::NE_STRICT:
2140  cond = not_equal;
2141  break;
2142  case Token::LT:
2143  cond = is_unsigned ? below : less;
2144  break;
2145  case Token::GT:
2146  cond = is_unsigned ? above : greater;
2147  break;
2148  case Token::LTE:
2149  cond = is_unsigned ? below_equal : less_equal;
2150  break;
2151  case Token::GTE:
2152  cond = is_unsigned ? above_equal : greater_equal;
2153  break;
2154  case Token::IN:
2155  case Token::INSTANCEOF:
2156  default:
2157  UNREACHABLE();
2158  }
2159  return cond;
2160 }
2161 
2162 
2163 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2164  LOperand* left = instr->left();
2165  LOperand* right = instr->right();
2166  Condition cc = TokenToCondition(instr->op(), instr->is_double());
2167 
2168  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2169  // We can statically evaluate the comparison.
2170  double left_val = ToDouble(LConstantOperand::cast(left));
2171  double right_val = ToDouble(LConstantOperand::cast(right));
2172  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2173  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2174  EmitGoto(next_block);
2175  } else {
2176  if (instr->is_double()) {
2177  // Don't base result on EFLAGS when a NaN is involved. Instead
2178  // jump to the false block.
2179  __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2180  __ j(parity_even, instr->FalseLabel(chunk_));
2181  } else {
2182  int32_t value;
2183  if (right->IsConstantOperand()) {
2184  value = ToInteger32(LConstantOperand::cast(right));
2185  if (instr->hydrogen_value()->representation().IsSmi()) {
2186  __ Cmp(ToRegister(left), Smi::FromInt(value));
2187  } else {
2188  __ cmpl(ToRegister(left), Immediate(value));
2189  }
2190  } else if (left->IsConstantOperand()) {
2191  value = ToInteger32(LConstantOperand::cast(left));
2192  if (instr->hydrogen_value()->representation().IsSmi()) {
2193  if (right->IsRegister()) {
2194  __ Cmp(ToRegister(right), Smi::FromInt(value));
2195  } else {
2196  __ Cmp(ToOperand(right), Smi::FromInt(value));
2197  }
2198  } else if (right->IsRegister()) {
2199  __ cmpl(ToRegister(right), Immediate(value));
2200  } else {
2201  __ cmpl(ToOperand(right), Immediate(value));
2202  }
2203  // We transposed the operands. Reverse the condition.
2204  cc = ReverseCondition(cc);
2205  } else if (instr->hydrogen_value()->representation().IsSmi()) {
2206  if (right->IsRegister()) {
2207  __ cmpp(ToRegister(left), ToRegister(right));
2208  } else {
2209  __ cmpp(ToRegister(left), ToOperand(right));
2210  }
2211  } else {
2212  if (right->IsRegister()) {
2213  __ cmpl(ToRegister(left), ToRegister(right));
2214  } else {
2215  __ cmpl(ToRegister(left), ToOperand(right));
2216  }
2217  }
2218  }
2219  EmitBranch(instr, cc);
2220  }
2221 }
2222 
2223 
2224 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2225  Register left = ToRegister(instr->left());
2226 
2227  if (instr->right()->IsConstantOperand()) {
2228  Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2229  __ Cmp(left, right);
2230  } else {
2231  Register right = ToRegister(instr->right());
2232  __ cmpp(left, right);
2233  }
2234  EmitBranch(instr, equal);
2235 }
2236 
2237 
2238 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2239  if (instr->hydrogen()->representation().IsTagged()) {
2240  Register input_reg = ToRegister(instr->object());
2241  __ Cmp(input_reg, factory()->the_hole_value());
2242  EmitBranch(instr, equal);
2243  return;
2244  }
2245 
2246  XMMRegister input_reg = ToDoubleRegister(instr->object());
2247  __ ucomisd(input_reg, input_reg);
2248  EmitFalseBranch(instr, parity_odd);
2249 
2250  __ subp(rsp, Immediate(kDoubleSize));
2251  __ movsd(MemOperand(rsp, 0), input_reg);
2252  __ addp(rsp, Immediate(kDoubleSize));
2253 
2254  int offset = sizeof(kHoleNanUpper32);
2255  __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2256  EmitBranch(instr, equal);
2257 }
2258 
2259 
2260 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2261  Representation rep = instr->hydrogen()->value()->representation();
2262  ASSERT(!rep.IsInteger32());
2263 
2264  if (rep.IsDouble()) {
2265  XMMRegister value = ToDoubleRegister(instr->value());
2266  XMMRegister xmm_scratch = double_scratch0();
2267  __ xorps(xmm_scratch, xmm_scratch);
2268  __ ucomisd(xmm_scratch, value);
2269  EmitFalseBranch(instr, not_equal);
2270  __ movmskpd(kScratchRegister, value);
2271  __ testl(kScratchRegister, Immediate(1));
2272  EmitBranch(instr, not_zero);
2273  } else {
2274  Register value = ToRegister(instr->value());
2275  Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2276  __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2278  Immediate(0x1));
2279  EmitFalseBranch(instr, no_overflow);
2281  Immediate(0x00000000));
2282  EmitBranch(instr, equal);
2283  }
2284 }
2285 
2286 
2287 Condition LCodeGen::EmitIsObject(Register input,
2288  Label* is_not_object,
2289  Label* is_object) {
2290  ASSERT(!input.is(kScratchRegister));
2291 
2292  __ JumpIfSmi(input, is_not_object);
2293 
2294  __ CompareRoot(input, Heap::kNullValueRootIndex);
2295  __ j(equal, is_object);
2296 
2298  // Undetectable objects behave like undefined.
2300  Immediate(1 << Map::kIsUndetectable));
2301  __ j(not_zero, is_not_object);
2302 
2303  __ movzxbl(kScratchRegister,
2306  __ j(below, is_not_object);
2308  return below_equal;
2309 }
2310 
2311 
2312 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2313  Register reg = ToRegister(instr->value());
2314 
2315  Condition true_cond = EmitIsObject(
2316  reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2317 
2318  EmitBranch(instr, true_cond);
2319 }
2320 
2321 
2322 Condition LCodeGen::EmitIsString(Register input,
2323  Register temp1,
2324  Label* is_not_string,
2325  SmiCheck check_needed = INLINE_SMI_CHECK) {
2326  if (check_needed == INLINE_SMI_CHECK) {
2327  __ JumpIfSmi(input, is_not_string);
2328  }
2329 
2330  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2331 
2332  return cond;
2333 }
2334 
2335 
2336 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2337  Register reg = ToRegister(instr->value());
2338  Register temp = ToRegister(instr->temp());
2339 
2340  SmiCheck check_needed =
2341  instr->hydrogen()->value()->IsHeapObject()
2343 
2344  Condition true_cond = EmitIsString(
2345  reg, temp, instr->FalseLabel(chunk_), check_needed);
2346 
2347  EmitBranch(instr, true_cond);
2348 }
2349 
2350 
2351 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2352  Condition is_smi;
2353  if (instr->value()->IsRegister()) {
2354  Register input = ToRegister(instr->value());
2355  is_smi = masm()->CheckSmi(input);
2356  } else {
2357  Operand input = ToOperand(instr->value());
2358  is_smi = masm()->CheckSmi(input);
2359  }
2360  EmitBranch(instr, is_smi);
2361 }
2362 
2363 
2364 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2365  Register input = ToRegister(instr->value());
2366  Register temp = ToRegister(instr->temp());
2367 
2368  if (!instr->hydrogen()->value()->IsHeapObject()) {
2369  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2370  }
2371  __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2372  __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2373  Immediate(1 << Map::kIsUndetectable));
2374  EmitBranch(instr, not_zero);
2375 }
2376 
2377 
2378 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2379  ASSERT(ToRegister(instr->context()).is(rsi));
2380  Token::Value op = instr->op();
2381 
2382  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2383  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2384 
2385  Condition condition = TokenToCondition(op, false);
2386  __ testp(rax, rax);
2387 
2388  EmitBranch(instr, condition);
2389 }
2390 
2391 
2392 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2393  InstanceType from = instr->from();
2394  InstanceType to = instr->to();
2395  if (from == FIRST_TYPE) return to;
2396  ASSERT(from == to || to == LAST_TYPE);
2397  return from;
2398 }
2399 
2400 
2401 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2402  InstanceType from = instr->from();
2403  InstanceType to = instr->to();
2404  if (from == to) return equal;
2405  if (to == LAST_TYPE) return above_equal;
2406  if (from == FIRST_TYPE) return below_equal;
2407  UNREACHABLE();
2408  return equal;
2409 }
2410 
2411 
2412 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2413  Register input = ToRegister(instr->value());
2414 
2415  if (!instr->hydrogen()->value()->IsHeapObject()) {
2416  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2417  }
2418 
2419  __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2420  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2421 }
2422 
2423 
2424 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2425  Register input = ToRegister(instr->value());
2426  Register result = ToRegister(instr->result());
2427 
2428  __ AssertString(input);
2429 
2430  __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2432  __ IndexFromHash(result, result);
2433 }
2434 
2435 
2436 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2437  LHasCachedArrayIndexAndBranch* instr) {
2438  Register input = ToRegister(instr->value());
2439 
2442  EmitBranch(instr, equal);
2443 }
2444 
2445 
2446 // Branches to a label or falls through with the answer in the z flag.
2447 // Trashes the temp register.
2448 void LCodeGen::EmitClassOfTest(Label* is_true,
2449  Label* is_false,
2450  Handle<String> class_name,
2451  Register input,
2452  Register temp,
2453  Register temp2) {
2454  ASSERT(!input.is(temp));
2455  ASSERT(!input.is(temp2));
2456  ASSERT(!temp.is(temp2));
2457 
2458  __ JumpIfSmi(input, is_false);
2459 
2460  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2461  // Assuming the following assertions, we can use the same compares to test
2462  // for both being a function type and being in the object type range.
2467  LAST_SPEC_OBJECT_TYPE - 1);
2469  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2470  __ j(below, is_false);
2471  __ j(equal, is_true);
2472  __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2473  __ j(equal, is_true);
2474  } else {
2475  // Faster code path to avoid two compares: subtract lower bound from the
2476  // actual type and do a signed compare with the width of the type range.
2477  __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2478  __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2479  __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2480  __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2482  __ j(above, is_false);
2483  }
2484 
2485  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2486  // Check if the constructor in the map is a function.
2487  __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
2488 
2489  // Objects with a non-function constructor have class 'Object'.
2490  __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
2491  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2492  __ j(not_equal, is_true);
2493  } else {
2494  __ j(not_equal, is_false);
2495  }
2496 
2497  // temp now contains the constructor function. Grab the
2498  // instance class name from there.
2500  __ movp(temp, FieldOperand(temp,
2502  // The class name we are testing against is internalized since it's a literal.
2503  // The name in the constructor is internalized because of the way the context
2504  // is booted. This routine isn't expected to work for random API-created
2505  // classes and it doesn't have to because you can't access it with natives
2506  // syntax. Since both sides are internalized it is sufficient to use an
2507  // identity comparison.
2508  ASSERT(class_name->IsInternalizedString());
2509  __ Cmp(temp, class_name);
2510  // End with the answer in the z flag.
2511 }
2512 
2513 
2514 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2515  Register input = ToRegister(instr->value());
2516  Register temp = ToRegister(instr->temp());
2517  Register temp2 = ToRegister(instr->temp2());
2518  Handle<String> class_name = instr->hydrogen()->class_name();
2519 
2520  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2521  class_name, input, temp, temp2);
2522 
2523  EmitBranch(instr, equal);
2524 }
2525 
2526 
2527 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2528  Register reg = ToRegister(instr->value());
2529 
2530  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2531  EmitBranch(instr, equal);
2532 }
2533 
2534 
2535 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2536  ASSERT(ToRegister(instr->context()).is(rsi));
2537  InstanceofStub stub(InstanceofStub::kNoFlags);
2538  __ Push(ToRegister(instr->left()));
2539  __ Push(ToRegister(instr->right()));
2540  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2541  Label true_value, done;
2542  __ testp(rax, rax);
2543  __ j(zero, &true_value, Label::kNear);
2544  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2545  __ jmp(&done, Label::kNear);
2546  __ bind(&true_value);
2547  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2548  __ bind(&done);
2549 }
2550 
2551 
2552 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2553  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2554  public:
2555  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2556  LInstanceOfKnownGlobal* instr)
2557  : LDeferredCode(codegen), instr_(instr) { }
2558  virtual void Generate() V8_OVERRIDE {
2559  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2560  }
2561  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2562  Label* map_check() { return &map_check_; }
2563  private:
2564  LInstanceOfKnownGlobal* instr_;
2565  Label map_check_;
2566  };
2567 
2568  ASSERT(ToRegister(instr->context()).is(rsi));
2569  DeferredInstanceOfKnownGlobal* deferred;
2570  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2571 
2572  Label done, false_result;
2573  Register object = ToRegister(instr->value());
2574 
2575  // A Smi is not an instance of anything.
2576  __ JumpIfSmi(object, &false_result, Label::kNear);
2577 
2578  // This is the inlined call site instanceof cache. The two occurences of the
2579  // hole value will be patched to the last map/result pair generated by the
2580  // instanceof stub.
2581  Label cache_miss;
2582  // Use a temp register to avoid memory operands with variable lengths.
2583  Register map = ToRegister(instr->temp());
2584  __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2585  __ bind(deferred->map_check()); // Label for calculating code patching.
2586  Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2587  __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2588  __ cmpp(map, Operand(kScratchRegister, 0));
2589  __ j(not_equal, &cache_miss, Label::kNear);
2590  // Patched to load either true or false.
2591  __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2592 #ifdef DEBUG
2593  // Check that the code size between patch label and patch sites is invariant.
2594  Label end_of_patched_code;
2595  __ bind(&end_of_patched_code);
2596  ASSERT(true);
2597 #endif
2598  __ jmp(&done, Label::kNear);
2599 
2600  // The inlined call site cache did not match. Check for null and string
2601  // before calling the deferred code.
2602  __ bind(&cache_miss); // Null is not an instance of anything.
2603  __ CompareRoot(object, Heap::kNullValueRootIndex);
2604  __ j(equal, &false_result, Label::kNear);
2605 
2606  // String values are not instances of anything.
2607  __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2608 
2609  __ bind(&false_result);
2610  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2611 
2612  __ bind(deferred->exit());
2613  __ bind(&done);
2614 }
2615 
2616 
2617 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2618  Label* map_check) {
2619  {
2620  PushSafepointRegistersScope scope(this);
2623  InstanceofStub stub(flags);
2624 
2625  __ Push(ToRegister(instr->value()));
2626  __ Push(instr->function());
2627 
2628  static const int kAdditionalDelta = 10;
2629  int delta =
2630  masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2631  ASSERT(delta >= 0);
2632  __ PushImm32(delta);
2633 
2634  // We are pushing three values on the stack but recording a
2635  // safepoint with two arguments because stub is going to
2636  // remove the third argument from the stack before jumping
2637  // to instanceof builtin on the slow path.
2638  CallCodeGeneric(stub.GetCode(isolate()),
2639  RelocInfo::CODE_TARGET,
2640  instr,
2641  RECORD_SAFEPOINT_WITH_REGISTERS,
2642  2);
2643  ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2644  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2645  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2646  // Move result to a register that survives the end of the
2647  // PushSafepointRegisterScope.
2648  __ movp(kScratchRegister, rax);
2649  }
2651  Label load_false;
2652  Label done;
2653  __ j(not_zero, &load_false, Label::kNear);
2654  __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2655  __ jmp(&done, Label::kNear);
2656  __ bind(&load_false);
2657  __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2658  __ bind(&done);
2659 }
2660 
2661 
2662 void LCodeGen::DoCmpT(LCmpT* instr) {
2663  ASSERT(ToRegister(instr->context()).is(rsi));
2664  Token::Value op = instr->op();
2665 
2666  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2667  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2668 
2669  Condition condition = TokenToCondition(op, false);
2670  Label true_value, done;
2671  __ testp(rax, rax);
2672  __ j(condition, &true_value, Label::kNear);
2673  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2674  __ jmp(&done, Label::kNear);
2675  __ bind(&true_value);
2676  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2677  __ bind(&done);
2678 }
2679 
2680 
2681 void LCodeGen::DoReturn(LReturn* instr) {
2682  if (FLAG_trace && info()->IsOptimizing()) {
2683  // Preserve the return value on the stack and rely on the runtime call
2684  // to return the value in the same register. We're leaving the code
2685  // managed by the register allocator and tearing down the frame, it's
2686  // safe to write to the context register.
2687  __ Push(rax);
2689  __ CallRuntime(Runtime::kTraceExit, 1);
2690  }
2691  if (info()->saves_caller_doubles()) {
2692  RestoreCallerDoubles();
2693  }
2694  int no_frame_start = -1;
2695  if (NeedsEagerFrame()) {
2696  __ movp(rsp, rbp);
2697  __ popq(rbp);
2698  no_frame_start = masm_->pc_offset();
2699  }
2700  if (instr->has_constant_parameter_count()) {
2701  __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2702  rcx);
2703  } else {
2704  Register reg = ToRegister(instr->parameter_count());
2705  // The argument count parameter is a smi
2706  __ SmiToInteger32(reg, reg);
2707  Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2708  __ PopReturnAddressTo(return_addr_reg);
2709  __ shl(reg, Immediate(kPointerSizeLog2));
2710  __ addp(rsp, reg);
2711  __ jmp(return_addr_reg);
2712  }
2713  if (no_frame_start != -1) {
2714  info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2715  }
2716 }
2717 
2718 
2719 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2720  Register result = ToRegister(instr->result());
2721  __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2722  if (instr->hydrogen()->RequiresHoleCheck()) {
2723  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2724  DeoptimizeIf(equal, instr->environment());
2725  }
2726 }
2727 
2728 
2729 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2730  ASSERT(ToRegister(instr->context()).is(rsi));
2731  ASSERT(ToRegister(instr->global_object()).is(rax));
2732  ASSERT(ToRegister(instr->result()).is(rax));
2733 
2734  __ Move(rcx, instr->name());
2735  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2736  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2737  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2738 }
2739 
2740 
2741 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2742  Register value = ToRegister(instr->value());
2743  Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2744 
2745  // If the cell we are storing to contains the hole it could have
2746  // been deleted from the property dictionary. In that case, we need
2747  // to update the property details in the property dictionary to mark
2748  // it as no longer deleted. We deoptimize in that case.
2749  if (instr->hydrogen()->RequiresHoleCheck()) {
2750  // We have a temp because CompareRoot might clobber kScratchRegister.
2751  Register cell = ToRegister(instr->temp());
2752  ASSERT(!value.is(cell));
2753  __ Move(cell, cell_handle, RelocInfo::CELL);
2754  __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2755  DeoptimizeIf(equal, instr->environment());
2756  // Store the value.
2757  __ movp(Operand(cell, 0), value);
2758  } else {
2759  // Store the value.
2760  __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
2761  __ movp(Operand(kScratchRegister, 0), value);
2762  }
2763  // Cells are always rescanned, so no write barrier here.
2764 }
2765 
2766 
2767 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2768  Register context = ToRegister(instr->context());
2769  Register result = ToRegister(instr->result());
2770  __ movp(result, ContextOperand(context, instr->slot_index()));
2771  if (instr->hydrogen()->RequiresHoleCheck()) {
2772  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2773  if (instr->hydrogen()->DeoptimizesOnHole()) {
2774  DeoptimizeIf(equal, instr->environment());
2775  } else {
2776  Label is_not_hole;
2777  __ j(not_equal, &is_not_hole, Label::kNear);
2778  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2779  __ bind(&is_not_hole);
2780  }
2781  }
2782 }
2783 
2784 
2785 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2786  Register context = ToRegister(instr->context());
2787  Register value = ToRegister(instr->value());
2788 
2789  Operand target = ContextOperand(context, instr->slot_index());
2790 
2791  Label skip_assignment;
2792  if (instr->hydrogen()->RequiresHoleCheck()) {
2793  __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2794  if (instr->hydrogen()->DeoptimizesOnHole()) {
2795  DeoptimizeIf(equal, instr->environment());
2796  } else {
2797  __ j(not_equal, &skip_assignment);
2798  }
2799  }
2800  __ movp(target, value);
2801 
2802  if (instr->hydrogen()->NeedsWriteBarrier()) {
2803  SmiCheck check_needed =
2804  instr->hydrogen()->value()->IsHeapObject()
2806  int offset = Context::SlotOffset(instr->slot_index());
2807  Register scratch = ToRegister(instr->temp());
2808  __ RecordWriteContextSlot(context,
2809  offset,
2810  value,
2811  scratch,
2812  kSaveFPRegs,
2814  check_needed);
2815  }
2816 
2817  __ bind(&skip_assignment);
2818 }
2819 
2820 
2821 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2822  HObjectAccess access = instr->hydrogen()->access();
2823  int offset = access.offset();
2824 
2825  if (access.IsExternalMemory()) {
2826  Register result = ToRegister(instr->result());
2827  if (instr->object()->IsConstantOperand()) {
2828  ASSERT(result.is(rax));
2829  __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2830  } else {
2831  Register object = ToRegister(instr->object());
2832  __ Load(result, MemOperand(object, offset), access.representation());
2833  }
2834  return;
2835  }
2836 
2837  Register object = ToRegister(instr->object());
2838  if (instr->hydrogen()->representation().IsDouble()) {
2839  XMMRegister result = ToDoubleRegister(instr->result());
2840  __ movsd(result, FieldOperand(object, offset));
2841  return;
2842  }
2843 
2844  Register result = ToRegister(instr->result());
2845  if (!access.IsInobject()) {
2846  __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2847  object = result;
2848  }
2849 
2850  Representation representation = access.representation();
2851  if (representation.IsSmi() &&
2852  instr->hydrogen()->representation().IsInteger32()) {
2853 #ifdef DEBUG
2854  Register scratch = kScratchRegister;
2855  __ Load(scratch, FieldOperand(object, offset), representation);
2856  __ AssertSmi(scratch);
2857 #endif
2858 
2859  // Read int value directly from upper half of the smi.
2860  STATIC_ASSERT(kSmiTag == 0);
2862  offset += kPointerSize / 2;
2863  representation = Representation::Integer32();
2864  }
2865  __ Load(result, FieldOperand(object, offset), representation);
2866 }
2867 
2868 
2869 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2870  ASSERT(ToRegister(instr->context()).is(rsi));
2871  ASSERT(ToRegister(instr->object()).is(rax));
2872  ASSERT(ToRegister(instr->result()).is(rax));
2873 
2874  __ Move(rcx, instr->name());
2875  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
2876  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2877 }
2878 
2879 
2880 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2881  Register function = ToRegister(instr->function());
2882  Register result = ToRegister(instr->result());
2883 
2884  // Check that the function really is a function.
2885  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2886  DeoptimizeIf(not_equal, instr->environment());
2887 
2888  // Check whether the function has an instance prototype.
2889  Label non_instance;
2890  __ testb(FieldOperand(result, Map::kBitFieldOffset),
2891  Immediate(1 << Map::kHasNonInstancePrototype));
2892  __ j(not_zero, &non_instance, Label::kNear);
2893 
2894  // Get the prototype or initial map from the function.
2895  __ movp(result,
2897 
2898  // Check that the function has a prototype or an initial map.
2899  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2900  DeoptimizeIf(equal, instr->environment());
2901 
2902  // If the function does not have an initial map, we're done.
2903  Label done;
2904  __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
2905  __ j(not_equal, &done, Label::kNear);
2906 
2907  // Get the prototype from the initial map.
2908  __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
2909  __ jmp(&done, Label::kNear);
2910 
2911  // Non-instance prototype: Fetch prototype from constructor field
2912  // in the function's map.
2913  __ bind(&non_instance);
2914  __ movp(result, FieldOperand(result, Map::kConstructorOffset));
2915 
2916  // All done.
2917  __ bind(&done);
2918 }
2919 
2920 
2921 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2922  Register result = ToRegister(instr->result());
2923  __ LoadRoot(result, instr->index());
2924 }
2925 
2926 
2927 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2928  Register arguments = ToRegister(instr->arguments());
2929  Register result = ToRegister(instr->result());
2930 
2931  if (instr->length()->IsConstantOperand() &&
2932  instr->index()->IsConstantOperand()) {
2933  int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2934  int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2935  if (const_index >= 0 && const_index < const_length) {
2936  StackArgumentsAccessor args(arguments, const_length,
2938  __ movp(result, args.GetArgumentOperand(const_index));
2939  } else if (FLAG_debug_code) {
2940  __ int3();
2941  }
2942  } else {
2943  Register length = ToRegister(instr->length());
2944  // There are two words between the frame pointer and the last argument.
2945  // Subtracting from length accounts for one of them add one more.
2946  if (instr->index()->IsRegister()) {
2947  __ subl(length, ToRegister(instr->index()));
2948  } else {
2949  __ subl(length, ToOperand(instr->index()));
2950  }
2951  StackArgumentsAccessor args(arguments, length,
2953  __ movp(result, args.GetArgumentOperand(0));
2954  }
2955 }
2956 
2957 
2958 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2959  ElementsKind elements_kind = instr->elements_kind();
2960  LOperand* key = instr->key();
2961  int base_offset = instr->is_fixed_typed_array()
2963  : 0;
2964  Operand operand(BuildFastArrayOperand(
2965  instr->elements(),
2966  key,
2967  elements_kind,
2968  base_offset,
2969  instr->additional_index()));
2970 
2971  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
2972  elements_kind == FLOAT32_ELEMENTS) {
2973  XMMRegister result(ToDoubleRegister(instr->result()));
2974  __ movss(result, operand);
2975  __ cvtss2sd(result, result);
2976  } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
2977  elements_kind == FLOAT64_ELEMENTS) {
2978  __ movsd(ToDoubleRegister(instr->result()), operand);
2979  } else {
2980  Register result(ToRegister(instr->result()));
2981  switch (elements_kind) {
2983  case INT8_ELEMENTS:
2984  __ movsxbq(result, operand);
2985  break;
2988  case UINT8_ELEMENTS:
2990  __ movzxbp(result, operand);
2991  break;
2993  case INT16_ELEMENTS:
2994  __ movsxwq(result, operand);
2995  break;
2997  case UINT16_ELEMENTS:
2998  __ movzxwp(result, operand);
2999  break;
3001  case INT32_ELEMENTS:
3002  __ movsxlq(result, operand);
3003  break;
3005  case UINT32_ELEMENTS:
3006  __ movl(result, operand);
3007  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3008  __ testl(result, result);
3009  DeoptimizeIf(negative, instr->environment());
3010  }
3011  break;
3014  case FLOAT32_ELEMENTS:
3015  case FLOAT64_ELEMENTS:
3016  case FAST_ELEMENTS:
3017  case FAST_SMI_ELEMENTS:
3018  case FAST_DOUBLE_ELEMENTS:
3019  case FAST_HOLEY_ELEMENTS:
3022  case DICTIONARY_ELEMENTS:
3024  UNREACHABLE();
3025  break;
3026  }
3027  }
3028 }
3029 
3030 
3031 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3032  XMMRegister result(ToDoubleRegister(instr->result()));
3033  LOperand* key = instr->key();
3034  if (instr->hydrogen()->RequiresHoleCheck()) {
3036  sizeof(kHoleNanLower32);
3037  Operand hole_check_operand = BuildFastArrayOperand(
3038  instr->elements(),
3039  key,
3041  offset,
3042  instr->additional_index());
3043  __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3044  DeoptimizeIf(equal, instr->environment());
3045  }
3046 
3047  Operand double_load_operand = BuildFastArrayOperand(
3048  instr->elements(),
3049  key,
3052  instr->additional_index());
3053  __ movsd(result, double_load_operand);
3054 }
3055 
3056 
3057 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3058  HLoadKeyed* hinstr = instr->hydrogen();
3059  Register result = ToRegister(instr->result());
3060  LOperand* key = instr->key();
3061  bool requires_hole_check = hinstr->RequiresHoleCheck();
3062  int offset = FixedArray::kHeaderSize - kHeapObjectTag;
3063  Representation representation = hinstr->representation();
3064 
3065  if (representation.IsInteger32() &&
3066  hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3067  ASSERT(!requires_hole_check);
3068 #ifdef DEBUG
3069  Register scratch = kScratchRegister;
3070  __ Load(scratch,
3071  BuildFastArrayOperand(instr->elements(),
3072  key,
3073  FAST_ELEMENTS,
3074  offset,
3075  instr->additional_index()),
3077  __ AssertSmi(scratch);
3078 #endif
3079  // Read int value directly from upper half of the smi.
3080  STATIC_ASSERT(kSmiTag == 0);
3082  offset += kPointerSize / 2;
3083  }
3084 
3085  __ Load(result,
3086  BuildFastArrayOperand(instr->elements(),
3087  key,
3088  FAST_ELEMENTS,
3089  offset,
3090  instr->additional_index()),
3091  representation);
3092 
3093  // Check for the hole value.
3094  if (requires_hole_check) {
3095  if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3096  Condition smi = __ CheckSmi(result);
3097  DeoptimizeIf(NegateCondition(smi), instr->environment());
3098  } else {
3099  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3100  DeoptimizeIf(equal, instr->environment());
3101  }
3102  }
3103 }
3104 
3105 
3106 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3107  if (instr->is_typed_elements()) {
3108  DoLoadKeyedExternalArray(instr);
3109  } else if (instr->hydrogen()->representation().IsDouble()) {
3110  DoLoadKeyedFixedDoubleArray(instr);
3111  } else {
3112  DoLoadKeyedFixedArray(instr);
3113  }
3114 }
3115 
3116 
3117 Operand LCodeGen::BuildFastArrayOperand(
3118  LOperand* elements_pointer,
3119  LOperand* key,
3120  ElementsKind elements_kind,
3121  uint32_t offset,
3122  uint32_t additional_index) {
3123  Register elements_pointer_reg = ToRegister(elements_pointer);
3124  int shift_size = ElementsKindToShiftSize(elements_kind);
3125  if (key->IsConstantOperand()) {
3126  int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3127  if (constant_value & 0xF0000000) {
3128  Abort(kArrayIndexConstantValueTooBig);
3129  }
3130  return Operand(elements_pointer_reg,
3131  ((constant_value + additional_index) << shift_size)
3132  + offset);
3133  } else {
3134  ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3135  return Operand(elements_pointer_reg,
3136  ToRegister(key),
3137  scale_factor,
3138  offset + (additional_index << shift_size));
3139  }
3140 }
3141 
3142 
3143 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3144  ASSERT(ToRegister(instr->context()).is(rsi));
3145  ASSERT(ToRegister(instr->object()).is(rdx));
3146  ASSERT(ToRegister(instr->key()).is(rax));
3147 
3148  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3149  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3150 }
3151 
3152 
3153 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3154  Register result = ToRegister(instr->result());
3155 
3156  if (instr->hydrogen()->from_inlined()) {
3157  __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3158  } else {
3159  // Check for arguments adapter frame.
3160  Label done, adapted;
3161  __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3162  __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3164  __ j(equal, &adapted, Label::kNear);
3165 
3166  // No arguments adaptor frame.
3167  __ movp(result, rbp);
3168  __ jmp(&done, Label::kNear);
3169 
3170  // Arguments adaptor frame present.
3171  __ bind(&adapted);
3172  __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3173 
3174  // Result is the frame pointer for the frame if not adapted and for the real
3175  // frame below the adaptor frame if adapted.
3176  __ bind(&done);
3177  }
3178 }
3179 
3180 
3181 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3182  Register result = ToRegister(instr->result());
3183 
3184  Label done;
3185 
3186  // If no arguments adaptor frame the number of arguments is fixed.
3187  if (instr->elements()->IsRegister()) {
3188  __ cmpp(rbp, ToRegister(instr->elements()));
3189  } else {
3190  __ cmpp(rbp, ToOperand(instr->elements()));
3191  }
3192  __ movl(result, Immediate(scope()->num_parameters()));
3193  __ j(equal, &done, Label::kNear);
3194 
3195  // Arguments adaptor frame present. Get argument length from there.
3196  __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3197  __ SmiToInteger32(result,
3198  Operand(result,
3200 
3201  // Argument length is in result register.
3202  __ bind(&done);
3203 }
3204 
3205 
3206 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3207  Register receiver = ToRegister(instr->receiver());
3208  Register function = ToRegister(instr->function());
3209 
3210  // If the receiver is null or undefined, we have to pass the global
3211  // object as a receiver to normal functions. Values have to be
3212  // passed unchanged to builtins and strict-mode functions.
3213  Label global_object, receiver_ok;
3214  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3215 
3216  if (!instr->hydrogen()->known_function()) {
3217  // Do not transform the receiver to object for strict mode
3218  // functions.
3219  __ movp(kScratchRegister,
3224  __ j(not_equal, &receiver_ok, dist);
3225 
3226  // Do not transform the receiver to object for builtins.
3230  __ j(not_equal, &receiver_ok, dist);
3231  }
3232 
3233  // Normal function. Replace undefined or null with global receiver.
3234  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3235  __ j(equal, &global_object, Label::kNear);
3236  __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3237  __ j(equal, &global_object, Label::kNear);
3238 
3239  // The receiver should be a JS object.
3240  Condition is_smi = __ CheckSmi(receiver);
3241  DeoptimizeIf(is_smi, instr->environment());
3242  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3243  DeoptimizeIf(below, instr->environment());
3244 
3245  __ jmp(&receiver_ok, Label::kNear);
3246  __ bind(&global_object);
3247  __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3248  __ movp(receiver,
3249  Operand(receiver,
3251  __ movp(receiver,
3253 
3254  __ bind(&receiver_ok);
3255 }
3256 
3257 
3258 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3259  Register receiver = ToRegister(instr->receiver());
3260  Register function = ToRegister(instr->function());
3261  Register length = ToRegister(instr->length());
3262  Register elements = ToRegister(instr->elements());
3263  ASSERT(receiver.is(rax)); // Used for parameter count.
3264  ASSERT(function.is(rdi)); // Required by InvokeFunction.
3265  ASSERT(ToRegister(instr->result()).is(rax));
3266 
3267  // Copy the arguments to this function possibly from the
3268  // adaptor frame below it.
3269  const uint32_t kArgumentsLimit = 1 * KB;
3270  __ cmpp(length, Immediate(kArgumentsLimit));
3271  DeoptimizeIf(above, instr->environment());
3272 
3273  __ Push(receiver);
3274  __ movp(receiver, length);
3275 
3276  // Loop through the arguments pushing them onto the execution
3277  // stack.
3278  Label invoke, loop;
3279  // length is a small non-negative integer, due to the test above.
3280  __ testl(length, length);
3281  __ j(zero, &invoke, Label::kNear);
3282  __ bind(&loop);
3283  StackArgumentsAccessor args(elements, length,
3285  __ Push(args.GetArgumentOperand(0));
3286  __ decl(length);
3287  __ j(not_zero, &loop);
3288 
3289  // Invoke the function.
3290  __ bind(&invoke);
3291  ASSERT(instr->HasPointerMap());
3292  LPointerMap* pointers = instr->pointer_map();
3293  SafepointGenerator safepoint_generator(
3294  this, pointers, Safepoint::kLazyDeopt);
3295  ParameterCount actual(rax);
3296  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3297 }
3298 
3299 
3300 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3301  LOperand* argument = instr->value();
3302  EmitPushTaggedOperand(argument);
3303 }
3304 
3305 
3306 void LCodeGen::DoDrop(LDrop* instr) {
3307  __ Drop(instr->count());
3308 }
3309 
3310 
3311 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3312  Register result = ToRegister(instr->result());
3313  __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3314 }
3315 
3316 
3317 void LCodeGen::DoContext(LContext* instr) {
3318  Register result = ToRegister(instr->result());
3319  if (info()->IsOptimizing()) {
3320  __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3321  } else {
3322  // If there is no frame, the context must be in rsi.
3323  ASSERT(result.is(rsi));
3324  }
3325 }
3326 
3327 
3328 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3329  ASSERT(ToRegister(instr->context()).is(rsi));
3330  __ Push(rsi); // The context is the first argument.
3331  __ Push(instr->hydrogen()->pairs());
3332  __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3333  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3334 }
3335 
3336 
3337 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3339  int arity,
3340  LInstruction* instr,
3341  RDIState rdi_state) {
3342  bool dont_adapt_arguments =
3343  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3344  bool can_invoke_directly =
3345  dont_adapt_arguments || formal_parameter_count == arity;
3346 
3347  LPointerMap* pointers = instr->pointer_map();
3348 
3349  if (can_invoke_directly) {
3350  if (rdi_state == RDI_UNINITIALIZED) {
3351  __ Move(rdi, function);
3352  }
3353 
3354  // Change context.
3356 
3357  // Set rax to arguments count if adaption is not needed. Assumes that rax
3358  // is available to write to at this point.
3359  if (dont_adapt_arguments) {
3360  __ Set(rax, arity);
3361  }
3362 
3363  // Invoke function.
3364  if (function.is_identical_to(info()->closure())) {
3365  __ CallSelf();
3366  } else {
3368  }
3369 
3370  // Set up deoptimization.
3371  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3372  } else {
3373  // We need to adapt arguments.
3374  SafepointGenerator generator(
3375  this, pointers, Safepoint::kLazyDeopt);
3376  ParameterCount count(arity);
3377  ParameterCount expected(formal_parameter_count);
3378  __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3379  }
3380 }
3381 
3382 
3383 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3384  ASSERT(ToRegister(instr->result()).is(rax));
3385 
3386  LPointerMap* pointers = instr->pointer_map();
3387  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3388 
3389  if (instr->target()->IsConstantOperand()) {
3390  LConstantOperand* target = LConstantOperand::cast(instr->target());
3391  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3392  generator.BeforeCall(__ CallSize(code));
3393  __ call(code, RelocInfo::CODE_TARGET);
3394  } else {
3395  ASSERT(instr->target()->IsRegister());
3396  Register target = ToRegister(instr->target());
3397  generator.BeforeCall(__ CallSize(target));
3398  __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3399  __ call(target);
3400  }
3401  generator.AfterCall();
3402 }
3403 
3404 
3405 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3406  ASSERT(ToRegister(instr->function()).is(rdi));
3407  ASSERT(ToRegister(instr->result()).is(rax));
3408 
3409  if (instr->hydrogen()->pass_argument_count()) {
3410  __ Set(rax, instr->arity());
3411  }
3412 
3413  // Change context.
3415 
3416  LPointerMap* pointers = instr->pointer_map();
3417  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3418 
3419  bool is_self_call = false;
3420  if (instr->hydrogen()->function()->IsConstant()) {
3421  Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3422  HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3423  jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3424  is_self_call = jsfun.is_identical_to(info()->closure());
3425  }
3426 
3427  if (is_self_call) {
3428  __ CallSelf();
3429  } else {
3430  Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3431  generator.BeforeCall(__ CallSize(target));
3432  __ Call(target);
3433  }
3434  generator.AfterCall();
3435 }
3436 
3437 
3438 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3439  Register input_reg = ToRegister(instr->value());
3440  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3441  Heap::kHeapNumberMapRootIndex);
3442  DeoptimizeIf(not_equal, instr->environment());
3443 
3444  Label slow, allocated, done;
3445  Register tmp = input_reg.is(rax) ? rcx : rax;
3446  Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3447 
3448  // Preserve the value of all registers.
3449  PushSafepointRegistersScope scope(this);
3450 
3451  __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3452  // Check the sign of the argument. If the argument is positive, just
3453  // return it. We do not need to patch the stack since |input| and
3454  // |result| are the same register and |input| will be restored
3455  // unchanged by popping safepoint registers.
3456  __ testl(tmp, Immediate(HeapNumber::kSignMask));
3457  __ j(zero, &done);
3458 
3459  __ AllocateHeapNumber(tmp, tmp2, &slow);
3460  __ jmp(&allocated, Label::kNear);
3461 
3462  // Slow case: Call the runtime system to do the number allocation.
3463  __ bind(&slow);
3464  CallRuntimeFromDeferred(
3465  Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
3466  // Set the pointer to the new heap number in tmp.
3467  if (!tmp.is(rax)) __ movp(tmp, rax);
3468  // Restore input_reg after call to runtime.
3469  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3470 
3471  __ bind(&allocated);
3472  __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3473  __ shl(tmp2, Immediate(1));
3474  __ shr(tmp2, Immediate(1));
3475  __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3476  __ StoreToSafepointRegisterSlot(input_reg, tmp);
3477 
3478  __ bind(&done);
3479 }
3480 
3481 
3482 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3483  Register input_reg = ToRegister(instr->value());
3484  __ testl(input_reg, input_reg);
3485  Label is_positive;
3486  __ j(not_sign, &is_positive, Label::kNear);
3487  __ negl(input_reg); // Sets flags.
3488  DeoptimizeIf(negative, instr->environment());
3489  __ bind(&is_positive);
3490 }
3491 
3492 
3493 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3494  Register input_reg = ToRegister(instr->value());
3495  __ testp(input_reg, input_reg);
3496  Label is_positive;
3497  __ j(not_sign, &is_positive, Label::kNear);
3498  __ negp(input_reg); // Sets flags.
3499  DeoptimizeIf(negative, instr->environment());
3500  __ bind(&is_positive);
3501 }
3502 
3503 
3504 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3505  // Class for deferred case.
3506  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3507  public:
3508  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3509  : LDeferredCode(codegen), instr_(instr) { }
3510  virtual void Generate() V8_OVERRIDE {
3511  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3512  }
3513  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3514  private:
3515  LMathAbs* instr_;
3516  };
3517 
3518  ASSERT(instr->value()->Equals(instr->result()));
3519  Representation r = instr->hydrogen()->value()->representation();
3520 
3521  if (r.IsDouble()) {
3522  XMMRegister scratch = double_scratch0();
3523  XMMRegister input_reg = ToDoubleRegister(instr->value());
3524  __ xorps(scratch, scratch);
3525  __ subsd(scratch, input_reg);
3526  __ andps(input_reg, scratch);
3527  } else if (r.IsInteger32()) {
3528  EmitIntegerMathAbs(instr);
3529  } else if (r.IsSmi()) {
3530  EmitSmiMathAbs(instr);
3531  } else { // Tagged case.
3532  DeferredMathAbsTaggedHeapNumber* deferred =
3533  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3534  Register input_reg = ToRegister(instr->value());
3535  // Smi check.
3536  __ JumpIfNotSmi(input_reg, deferred->entry());
3537  EmitSmiMathAbs(instr);
3538  __ bind(deferred->exit());
3539  }
3540 }
3541 
3542 
3543 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3544  XMMRegister xmm_scratch = double_scratch0();
3545  Register output_reg = ToRegister(instr->result());
3546  XMMRegister input_reg = ToDoubleRegister(instr->value());
3547 
3549  CpuFeatureScope scope(masm(), SSE4_1);
3550  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3551  // Deoptimize if minus zero.
3552  __ movq(output_reg, input_reg);
3553  __ subq(output_reg, Immediate(1));
3554  DeoptimizeIf(overflow, instr->environment());
3555  }
3556  __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3557  __ cvttsd2si(output_reg, xmm_scratch);
3558  __ cmpl(output_reg, Immediate(0x1));
3559  DeoptimizeIf(overflow, instr->environment());
3560  } else {
3561  Label negative_sign, done;
3562  // Deoptimize on unordered.
3563  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3564  __ ucomisd(input_reg, xmm_scratch);
3565  DeoptimizeIf(parity_even, instr->environment());
3566  __ j(below, &negative_sign, Label::kNear);
3567 
3568  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3569  // Check for negative zero.
3570  Label positive_sign;
3571  __ j(above, &positive_sign, Label::kNear);
3572  __ movmskpd(output_reg, input_reg);
3573  __ testq(output_reg, Immediate(1));
3574  DeoptimizeIf(not_zero, instr->environment());
3575  __ Set(output_reg, 0);
3576  __ jmp(&done, Label::kNear);
3577  __ bind(&positive_sign);
3578  }
3579 
3580  // Use truncating instruction (OK because input is positive).
3581  __ cvttsd2si(output_reg, input_reg);
3582  // Overflow is signalled with minint.
3583  __ cmpl(output_reg, Immediate(0x1));
3584  DeoptimizeIf(overflow, instr->environment());
3585  __ jmp(&done, Label::kNear);
3586 
3587  // Non-zero negative reaches here.
3588  __ bind(&negative_sign);
3589  // Truncate, then compare and compensate.
3590  __ cvttsd2si(output_reg, input_reg);
3591  __ Cvtlsi2sd(xmm_scratch, output_reg);
3592  __ ucomisd(input_reg, xmm_scratch);
3593  __ j(equal, &done, Label::kNear);
3594  __ subl(output_reg, Immediate(1));
3595  DeoptimizeIf(overflow, instr->environment());
3596 
3597  __ bind(&done);
3598  }
3599 }
3600 
3601 
3602 void LCodeGen::DoMathRound(LMathRound* instr) {
3603  const XMMRegister xmm_scratch = double_scratch0();
3604  Register output_reg = ToRegister(instr->result());
3605  XMMRegister input_reg = ToDoubleRegister(instr->value());
3606  XMMRegister input_temp = ToDoubleRegister(instr->temp());
3607  static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3608  static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3609 
3610  Label done, round_to_zero, below_one_half;
3611  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3612  __ movq(kScratchRegister, one_half);
3613  __ movq(xmm_scratch, kScratchRegister);
3614  __ ucomisd(xmm_scratch, input_reg);
3615  __ j(above, &below_one_half, Label::kNear);
3616 
3617  // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3618  __ addsd(xmm_scratch, input_reg);
3619  __ cvttsd2si(output_reg, xmm_scratch);
3620  // Overflow is signalled with minint.
3621  __ cmpl(output_reg, Immediate(0x1));
3622  __ RecordComment("D2I conversion overflow");
3623  DeoptimizeIf(overflow, instr->environment());
3624  __ jmp(&done, dist);
3625 
3626  __ bind(&below_one_half);
3627  __ movq(kScratchRegister, minus_one_half);
3628  __ movq(xmm_scratch, kScratchRegister);
3629  __ ucomisd(xmm_scratch, input_reg);
3630  __ j(below_equal, &round_to_zero, Label::kNear);
3631 
3632  // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3633  // compare and compensate.
3634  __ movq(input_temp, input_reg); // Do not alter input_reg.
3635  __ subsd(input_temp, xmm_scratch);
3636  __ cvttsd2si(output_reg, input_temp);
3637  // Catch minint due to overflow, and to prevent overflow when compensating.
3638  __ cmpl(output_reg, Immediate(0x1));
3639  __ RecordComment("D2I conversion overflow");
3640  DeoptimizeIf(overflow, instr->environment());
3641 
3642  __ Cvtlsi2sd(xmm_scratch, output_reg);
3643  __ ucomisd(xmm_scratch, input_temp);
3644  __ j(equal, &done, dist);
3645  __ subl(output_reg, Immediate(1));
3646  // No overflow because we already ruled out minint.
3647  __ jmp(&done, dist);
3648 
3649  __ bind(&round_to_zero);
3650  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3651  // we can ignore the difference between a result of -0 and +0.
3652  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3653  __ movq(output_reg, input_reg);
3654  __ testq(output_reg, output_reg);
3655  __ RecordComment("Minus zero");
3656  DeoptimizeIf(negative, instr->environment());
3657  }
3658  __ Set(output_reg, 0);
3659  __ bind(&done);
3660 }
3661 
3662 
3663 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3664  XMMRegister input_reg = ToDoubleRegister(instr->value());
3665  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3666  __ sqrtsd(input_reg, input_reg);
3667 }
3668 
3669 
3670 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3671  XMMRegister xmm_scratch = double_scratch0();
3672  XMMRegister input_reg = ToDoubleRegister(instr->value());
3673  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3674 
3675  // Note that according to ECMA-262 15.8.2.13:
3676  // Math.pow(-Infinity, 0.5) == Infinity
3677  // Math.sqrt(-Infinity) == NaN
3678  Label done, sqrt;
3679  // Check base for -Infinity. According to IEEE-754, double-precision
3680  // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3681  __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3682  __ movq(xmm_scratch, kScratchRegister);
3683  __ ucomisd(xmm_scratch, input_reg);
3684  // Comparing -Infinity with NaN results in "unordered", which sets the
3685  // zero flag as if both were equal. However, it also sets the carry flag.
3686  __ j(not_equal, &sqrt, Label::kNear);
3687  __ j(carry, &sqrt, Label::kNear);
3688  // If input is -Infinity, return Infinity.
3689  __ xorps(input_reg, input_reg);
3690  __ subsd(input_reg, xmm_scratch);
3691  __ jmp(&done, Label::kNear);
3692 
3693  // Square root.
3694  __ bind(&sqrt);
3695  __ xorps(xmm_scratch, xmm_scratch);
3696  __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3697  __ sqrtsd(input_reg, input_reg);
3698  __ bind(&done);
3699 }
3700 
3701 
3702 void LCodeGen::DoPower(LPower* instr) {
3703  Representation exponent_type = instr->hydrogen()->right()->representation();
3704  // Having marked this as a call, we can use any registers.
3705  // Just make sure that the input/output registers are the expected ones.
3706 
3707  Register exponent = rdx;
3708  ASSERT(!instr->right()->IsRegister() ||
3709  ToRegister(instr->right()).is(exponent));
3710  ASSERT(!instr->right()->IsDoubleRegister() ||
3711  ToDoubleRegister(instr->right()).is(xmm1));
3712  ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
3713  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
3714 
3715  if (exponent_type.IsSmi()) {
3716  MathPowStub stub(MathPowStub::TAGGED);
3717  __ CallStub(&stub);
3718  } else if (exponent_type.IsTagged()) {
3719  Label no_deopt;
3720  __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
3721  __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
3722  DeoptimizeIf(not_equal, instr->environment());
3723  __ bind(&no_deopt);
3724  MathPowStub stub(MathPowStub::TAGGED);
3725  __ CallStub(&stub);
3726  } else if (exponent_type.IsInteger32()) {
3727  MathPowStub stub(MathPowStub::INTEGER);
3728  __ CallStub(&stub);
3729  } else {
3730  ASSERT(exponent_type.IsDouble());
3731  MathPowStub stub(MathPowStub::DOUBLE);
3732  __ CallStub(&stub);
3733  }
3734 }
3735 
3736 
3737 void LCodeGen::DoMathExp(LMathExp* instr) {
3738  XMMRegister input = ToDoubleRegister(instr->value());
3739  XMMRegister result = ToDoubleRegister(instr->result());
3740  XMMRegister temp0 = double_scratch0();
3741  Register temp1 = ToRegister(instr->temp1());
3742  Register temp2 = ToRegister(instr->temp2());
3743 
3744  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3745 }
3746 
3747 
3748 void LCodeGen::DoMathLog(LMathLog* instr) {
3749  ASSERT(instr->value()->Equals(instr->result()));
3750  XMMRegister input_reg = ToDoubleRegister(instr->value());
3751  XMMRegister xmm_scratch = double_scratch0();
3752  Label positive, done, zero;
3753  __ xorps(xmm_scratch, xmm_scratch);
3754  __ ucomisd(input_reg, xmm_scratch);
3755  __ j(above, &positive, Label::kNear);
3756  __ j(not_carry, &zero, Label::kNear);
3757  ExternalReference nan =
3758  ExternalReference::address_of_canonical_non_hole_nan();
3759  Operand nan_operand = masm()->ExternalOperand(nan);
3760  __ movsd(input_reg, nan_operand);
3761  __ jmp(&done, Label::kNear);
3762  __ bind(&zero);
3763  ExternalReference ninf =
3764  ExternalReference::address_of_negative_infinity();
3765  Operand ninf_operand = masm()->ExternalOperand(ninf);
3766  __ movsd(input_reg, ninf_operand);
3767  __ jmp(&done, Label::kNear);
3768  __ bind(&positive);
3769  __ fldln2();
3770  __ subp(rsp, Immediate(kDoubleSize));
3771  __ movsd(Operand(rsp, 0), input_reg);
3772  __ fld_d(Operand(rsp, 0));
3773  __ fyl2x();
3774  __ fstp_d(Operand(rsp, 0));
3775  __ movsd(input_reg, Operand(rsp, 0));
3776  __ addp(rsp, Immediate(kDoubleSize));
3777  __ bind(&done);
3778 }
3779 
3780 
3781 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3782  Register input = ToRegister(instr->value());
3783  Register result = ToRegister(instr->result());
3784  Label not_zero_input;
3785  __ bsrl(result, input);
3786 
3787  __ j(not_zero, &not_zero_input);
3788  __ Set(result, 63); // 63^31 == 32
3789 
3790  __ bind(&not_zero_input);
3791  __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
3792 }
3793 
3794 
3795 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3796  ASSERT(ToRegister(instr->context()).is(rsi));
3797  ASSERT(ToRegister(instr->function()).is(rdi));
3798  ASSERT(instr->HasPointerMap());
3799 
3800  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3801  if (known_function.is_null()) {
3802  LPointerMap* pointers = instr->pointer_map();
3803  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3804  ParameterCount count(instr->arity());
3805  __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
3806  } else {
3807  CallKnownFunction(known_function,
3808  instr->hydrogen()->formal_parameter_count(),
3809  instr->arity(),
3810  instr,
3811  RDI_CONTAINS_TARGET);
3812  }
3813 }
3814 
3815 
3816 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3817  ASSERT(ToRegister(instr->context()).is(rsi));
3818  ASSERT(ToRegister(instr->function()).is(rdi));
3819  ASSERT(ToRegister(instr->result()).is(rax));
3820 
3821  int arity = instr->arity();
3822  CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
3823  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3824 }
3825 
3826 
3827 void LCodeGen::DoCallNew(LCallNew* instr) {
3828  ASSERT(ToRegister(instr->context()).is(rsi));
3829  ASSERT(ToRegister(instr->constructor()).is(rdi));
3830  ASSERT(ToRegister(instr->result()).is(rax));
3831 
3832  __ Set(rax, instr->arity());
3833  // No cell in ebx for construct type feedback in optimized code
3834  __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
3835  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3836  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3837 }
3838 
3839 
3840 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3841  ASSERT(ToRegister(instr->context()).is(rsi));
3842  ASSERT(ToRegister(instr->constructor()).is(rdi));
3843  ASSERT(ToRegister(instr->result()).is(rax));
3844 
3845  __ Set(rax, instr->arity());
3846  __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
3847  ElementsKind kind = instr->hydrogen()->elements_kind();
3848  AllocationSiteOverrideMode override_mode =
3851  : DONT_OVERRIDE;
3852 
3853  if (instr->arity() == 0) {
3854  ArrayNoArgumentConstructorStub stub(kind, override_mode);
3855  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3856  } else if (instr->arity() == 1) {
3857  Label done;
3858  if (IsFastPackedElementsKind(kind)) {
3859  Label packed_case;
3860  // We might need a change here
3861  // look at the first argument
3862  __ movp(rcx, Operand(rsp, 0));
3863  __ testp(rcx, rcx);
3864  __ j(zero, &packed_case, Label::kNear);
3865 
3866  ElementsKind holey_kind = GetHoleyElementsKind(kind);
3867  ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
3868  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3869  __ jmp(&done, Label::kNear);
3870  __ bind(&packed_case);
3871  }
3872 
3873  ArraySingleArgumentConstructorStub stub(kind, override_mode);
3874  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3875  __ bind(&done);
3876  } else {
3877  ArrayNArgumentsConstructorStub stub(kind, override_mode);
3878  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3879  }
3880 }
3881 
3882 
3883 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3884  ASSERT(ToRegister(instr->context()).is(rsi));
3885  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
3886 }
3887 
3888 
3889 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3890  Register function = ToRegister(instr->function());
3891  Register code_object = ToRegister(instr->code_object());
3892  __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
3893  __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
3894 }
3895 
3896 
3897 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3898  Register result = ToRegister(instr->result());
3899  Register base = ToRegister(instr->base_object());
3900  if (instr->offset()->IsConstantOperand()) {
3901  LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3902  __ leap(result, Operand(base, ToInteger32(offset)));
3903  } else {
3904  Register offset = ToRegister(instr->offset());
3905  __ leap(result, Operand(base, offset, times_1, 0));
3906  }
3907 }
3908 
3909 
3910 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3911  HStoreNamedField* hinstr = instr->hydrogen();
3912  Representation representation = instr->representation();
3913 
3914  HObjectAccess access = hinstr->access();
3915  int offset = access.offset();
3916 
3917  if (access.IsExternalMemory()) {
3918  ASSERT(!hinstr->NeedsWriteBarrier());
3919  Register value = ToRegister(instr->value());
3920  if (instr->object()->IsConstantOperand()) {
3921  ASSERT(value.is(rax));
3922  LConstantOperand* object = LConstantOperand::cast(instr->object());
3923  __ store_rax(ToExternalReference(object));
3924  } else {
3925  Register object = ToRegister(instr->object());
3926  __ Store(MemOperand(object, offset), value, representation);
3927  }
3928  return;
3929  }
3930 
3931  Register object = ToRegister(instr->object());
3932  Handle<Map> transition = instr->transition();
3933  SmiCheck check_needed = hinstr->value()->IsHeapObject()
3935 
3936  ASSERT(!(representation.IsSmi() &&
3937  instr->value()->IsConstantOperand() &&
3938  !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
3939  if (representation.IsHeapObject()) {
3940  if (instr->value()->IsConstantOperand()) {
3941  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3942  if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
3943  DeoptimizeIf(no_condition, instr->environment());
3944  }
3945  } else {
3946  if (!hinstr->value()->type().IsHeapObject()) {
3947  Register value = ToRegister(instr->value());
3948  Condition cc = masm()->CheckSmi(value);
3949  DeoptimizeIf(cc, instr->environment());
3950 
3951  // We know that value is a smi now, so we can omit the check below.
3952  check_needed = OMIT_SMI_CHECK;
3953  }
3954  }
3955  } else if (representation.IsDouble()) {
3956  ASSERT(transition.is_null());
3957  ASSERT(access.IsInobject());
3958  ASSERT(!hinstr->NeedsWriteBarrier());
3959  XMMRegister value = ToDoubleRegister(instr->value());
3960  __ movsd(FieldOperand(object, offset), value);
3961  return;
3962  }
3963 
3964  if (!transition.is_null()) {
3965  if (!hinstr->NeedsWriteBarrierForMap()) {
3966  __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
3967  } else {
3968  Register temp = ToRegister(instr->temp());
3969  __ Move(kScratchRegister, transition);
3971  // Update the write barrier for the map field.
3972  __ RecordWriteField(object,
3975  temp,
3976  kSaveFPRegs,
3978  OMIT_SMI_CHECK);
3979  }
3980  }
3981 
3982  // Do the store.
3983  Register write_register = object;
3984  if (!access.IsInobject()) {
3985  write_register = ToRegister(instr->temp());
3986  __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
3987  }
3988 
3989  if (representation.IsSmi() &&
3990  hinstr->value()->representation().IsInteger32()) {
3991  ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3992 #ifdef DEBUG
3993  Register scratch = kScratchRegister;
3994  __ Load(scratch, FieldOperand(write_register, offset), representation);
3995  __ AssertSmi(scratch);
3996 #endif
3997  // Store int value directly to upper half of the smi.
3998  STATIC_ASSERT(kSmiTag == 0);
4000  offset += kPointerSize / 2;
4001  representation = Representation::Integer32();
4002  }
4003 
4004  Operand operand = FieldOperand(write_register, offset);
4005 
4006  if (instr->value()->IsRegister()) {
4007  Register value = ToRegister(instr->value());
4008  __ Store(operand, value, representation);
4009  } else {
4010  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4011  if (IsInteger32Constant(operand_value)) {
4012  ASSERT(!hinstr->NeedsWriteBarrier());
4013  int32_t value = ToInteger32(operand_value);
4014  if (representation.IsSmi()) {
4015  __ Move(operand, Smi::FromInt(value));
4016 
4017  } else {
4018  __ movl(operand, Immediate(value));
4019  }
4020 
4021  } else {
4022  Handle<Object> handle_value = ToHandle(operand_value);
4023  ASSERT(!hinstr->NeedsWriteBarrier());
4024  __ Move(operand, handle_value);
4025  }
4026  }
4027 
4028  if (hinstr->NeedsWriteBarrier()) {
4029  Register value = ToRegister(instr->value());
4030  Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4031  // Update the write barrier for the object for in-object properties.
4032  __ RecordWriteField(write_register,
4033  offset,
4034  value,
4035  temp,
4036  kSaveFPRegs,
4038  check_needed);
4039  }
4040 }
4041 
4042 
4043 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4044  ASSERT(ToRegister(instr->context()).is(rsi));
4045  ASSERT(ToRegister(instr->object()).is(rdx));
4046  ASSERT(ToRegister(instr->value()).is(rax));
4047 
4048  __ Move(rcx, instr->hydrogen()->name());
4049  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4050  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4051 }
4052 
4053 
4054 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
4055  if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4056  Label done;
4057  __ j(NegateCondition(cc), &done, Label::kNear);
4058  __ int3();
4059  __ bind(&done);
4060  } else {
4061  DeoptimizeIf(cc, check->environment());
4062  }
4063 }
4064 
4065 
4066 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4067  HBoundsCheck* hinstr = instr->hydrogen();
4068  if (hinstr->skip_check()) return;
4069 
4070  Representation representation = hinstr->length()->representation();
4071  ASSERT(representation.Equals(hinstr->index()->representation()));
4072  ASSERT(representation.IsSmiOrInteger32());
4073 
4074  if (instr->length()->IsRegister()) {
4075  Register reg = ToRegister(instr->length());
4076 
4077  if (instr->index()->IsConstantOperand()) {
4078  int32_t constant_index =
4079  ToInteger32(LConstantOperand::cast(instr->index()));
4080  if (representation.IsSmi()) {
4081  __ Cmp(reg, Smi::FromInt(constant_index));
4082  } else {
4083  __ cmpl(reg, Immediate(constant_index));
4084  }
4085  } else {
4086  Register reg2 = ToRegister(instr->index());
4087  if (representation.IsSmi()) {
4088  __ cmpp(reg, reg2);
4089  } else {
4090  __ cmpl(reg, reg2);
4091  }
4092  }
4093  } else {
4094  Operand length = ToOperand(instr->length());
4095  if (instr->index()->IsConstantOperand()) {
4096  int32_t constant_index =
4097  ToInteger32(LConstantOperand::cast(instr->index()));
4098  if (representation.IsSmi()) {
4099  __ Cmp(length, Smi::FromInt(constant_index));
4100  } else {
4101  __ cmpl(length, Immediate(constant_index));
4102  }
4103  } else {
4104  if (representation.IsSmi()) {
4105  __ cmpp(length, ToRegister(instr->index()));
4106  } else {
4107  __ cmpl(length, ToRegister(instr->index()));
4108  }
4109  }
4110  }
4111  Condition condition = hinstr->allow_equality() ? below : below_equal;
4112  ApplyCheckIf(condition, instr);
4113 }
4114 
4115 
4116 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4117  ElementsKind elements_kind = instr->elements_kind();
4118  LOperand* key = instr->key();
4119  int base_offset = instr->is_fixed_typed_array()
4120  ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
4121  : 0;
4122  Operand operand(BuildFastArrayOperand(
4123  instr->elements(),
4124  key,
4125  elements_kind,
4126  base_offset,
4127  instr->additional_index()));
4128 
4129  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4130  elements_kind == FLOAT32_ELEMENTS) {
4131  XMMRegister value(ToDoubleRegister(instr->value()));
4132  __ cvtsd2ss(value, value);
4133  __ movss(operand, value);
4134  } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4135  elements_kind == FLOAT64_ELEMENTS) {
4136  __ movsd(operand, ToDoubleRegister(instr->value()));
4137  } else {
4138  Register value(ToRegister(instr->value()));
4139  switch (elements_kind) {
4143  case INT8_ELEMENTS:
4144  case UINT8_ELEMENTS:
4146  __ movb(operand, value);
4147  break;
4150  case INT16_ELEMENTS:
4151  case UINT16_ELEMENTS:
4152  __ movw(operand, value);
4153  break;
4156  case INT32_ELEMENTS:
4157  case UINT32_ELEMENTS:
4158  __ movl(operand, value);
4159  break;
4162  case FLOAT32_ELEMENTS:
4163  case FLOAT64_ELEMENTS:
4164  case FAST_ELEMENTS:
4165  case FAST_SMI_ELEMENTS:
4166  case FAST_DOUBLE_ELEMENTS:
4167  case FAST_HOLEY_ELEMENTS:
4170  case DICTIONARY_ELEMENTS:
4172  UNREACHABLE();
4173  break;
4174  }
4175  }
4176 }
4177 
4178 
4179 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4180  XMMRegister value = ToDoubleRegister(instr->value());
4181  LOperand* key = instr->key();
4182  if (instr->NeedsCanonicalization()) {
4183  Label have_value;
4184 
4185  __ ucomisd(value, value);
4186  __ j(parity_odd, &have_value, Label::kNear); // NaN.
4187 
4188  __ Set(kScratchRegister, BitCast<uint64_t>(
4190  __ movq(value, kScratchRegister);
4191 
4192  __ bind(&have_value);
4193  }
4194 
4195  Operand double_store_operand = BuildFastArrayOperand(
4196  instr->elements(),
4197  key,
4200  instr->additional_index());
4201 
4202  __ movsd(double_store_operand, value);
4203 }
4204 
4205 
4206 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4207  HStoreKeyed* hinstr = instr->hydrogen();
4208  LOperand* key = instr->key();
4209  int offset = FixedArray::kHeaderSize - kHeapObjectTag;
4210  Representation representation = hinstr->value()->representation();
4211 
4212  if (representation.IsInteger32()) {
4213  ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4214  ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4215 #ifdef DEBUG
4216  Register scratch = kScratchRegister;
4217  __ Load(scratch,
4218  BuildFastArrayOperand(instr->elements(),
4219  key,
4220  FAST_ELEMENTS,
4221  offset,
4222  instr->additional_index()),
4224  __ AssertSmi(scratch);
4225 #endif
4226  // Store int value directly to upper half of the smi.
4227  STATIC_ASSERT(kSmiTag == 0);
4229  offset += kPointerSize / 2;
4230  }
4231 
4232  Operand operand =
4233  BuildFastArrayOperand(instr->elements(),
4234  key,
4235  FAST_ELEMENTS,
4236  offset,
4237  instr->additional_index());
4238 
4239  if (instr->value()->IsRegister()) {
4240  __ Store(operand, ToRegister(instr->value()), representation);
4241  } else {
4242  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4243  if (IsInteger32Constant(operand_value)) {
4244  int32_t value = ToInteger32(operand_value);
4245  if (representation.IsSmi()) {
4246  __ Move(operand, Smi::FromInt(value));
4247 
4248  } else {
4249  __ movl(operand, Immediate(value));
4250  }
4251  } else {
4252  Handle<Object> handle_value = ToHandle(operand_value);
4253  __ Move(operand, handle_value);
4254  }
4255  }
4256 
4257  if (hinstr->NeedsWriteBarrier()) {
4258  Register elements = ToRegister(instr->elements());
4259  ASSERT(instr->value()->IsRegister());
4260  Register value = ToRegister(instr->value());
4261  ASSERT(!key->IsConstantOperand());
4262  SmiCheck check_needed = hinstr->value()->IsHeapObject()
4264  // Compute address of modified element and store it into key register.
4265  Register key_reg(ToRegister(key));
4266  __ leap(key_reg, operand);
4267  __ RecordWrite(elements,
4268  key_reg,
4269  value,
4270  kSaveFPRegs,
4272  check_needed);
4273  }
4274 }
4275 
4276 
4277 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4278  if (instr->is_typed_elements()) {
4279  DoStoreKeyedExternalArray(instr);
4280  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4281  DoStoreKeyedFixedDoubleArray(instr);
4282  } else {
4283  DoStoreKeyedFixedArray(instr);
4284  }
4285 }
4286 
4287 
4288 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4289  ASSERT(ToRegister(instr->context()).is(rsi));
4290  ASSERT(ToRegister(instr->object()).is(rdx));
4291  ASSERT(ToRegister(instr->key()).is(rcx));
4292  ASSERT(ToRegister(instr->value()).is(rax));
4293 
4294  Handle<Code> ic = instr->strict_mode() == STRICT
4295  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4296  : isolate()->builtins()->KeyedStoreIC_Initialize();
4297  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4298 }
4299 
4300 
4301 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4302  Register object_reg = ToRegister(instr->object());
4303 
4304  Handle<Map> from_map = instr->original_map();
4305  Handle<Map> to_map = instr->transitioned_map();
4306  ElementsKind from_kind = instr->from_kind();
4307  ElementsKind to_kind = instr->to_kind();
4308 
4309  Label not_applicable;
4310  __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4311  __ j(not_equal, &not_applicable);
4312  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4313  Register new_map_reg = ToRegister(instr->new_map_temp());
4314  __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4315  __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
4316  // Write barrier.
4317  ASSERT_NE(instr->temp(), NULL);
4318  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4319  ToRegister(instr->temp()), kDontSaveFPRegs);
4320  } else {
4321  ASSERT(ToRegister(instr->context()).is(rsi));
4322  PushSafepointRegistersScope scope(this);
4323  if (!object_reg.is(rax)) {
4324  __ movp(rax, object_reg);
4325  }
4326  __ Move(rbx, to_map);
4327  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4328  TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
4329  __ CallStub(&stub);
4330  RecordSafepointWithRegisters(
4331  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4332  }
4333  __ bind(&not_applicable);
4334 }
4335 
4336 
4337 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4338  Register object = ToRegister(instr->object());
4339  Register temp = ToRegister(instr->temp());
4340  Label no_memento_found;
4341  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4342  DeoptimizeIf(equal, instr->environment());
4343  __ bind(&no_memento_found);
4344 }
4345 
4346 
4347 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4348  ASSERT(ToRegister(instr->context()).is(rsi));
4349  ASSERT(ToRegister(instr->left()).is(rdx));
4350  ASSERT(ToRegister(instr->right()).is(rax));
4351  StringAddStub stub(instr->hydrogen()->flags(),
4352  instr->hydrogen()->pretenure_flag());
4353  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4354 }
4355 
4356 
4357 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4358  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4359  public:
4360  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4361  : LDeferredCode(codegen), instr_(instr) { }
4362  virtual void Generate() V8_OVERRIDE {
4363  codegen()->DoDeferredStringCharCodeAt(instr_);
4364  }
4365  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4366  private:
4367  LStringCharCodeAt* instr_;
4368  };
4369 
4370  DeferredStringCharCodeAt* deferred =
4371  new(zone()) DeferredStringCharCodeAt(this, instr);
4372 
4374  ToRegister(instr->string()),
4375  ToRegister(instr->index()),
4376  ToRegister(instr->result()),
4377  deferred->entry());
4378  __ bind(deferred->exit());
4379 }
4380 
4381 
4382 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4383  Register string = ToRegister(instr->string());
4384  Register result = ToRegister(instr->result());
4385 
4386  // TODO(3095996): Get rid of this. For now, we need to make the
4387  // result register contain a valid pointer because it is already
4388  // contained in the register pointer map.
4389  __ Set(result, 0);
4390 
4391  PushSafepointRegistersScope scope(this);
4392  __ Push(string);
4393  // Push the index as a smi. This is safe because of the checks in
4394  // DoStringCharCodeAt above.
4396  if (instr->index()->IsConstantOperand()) {
4397  int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4398  __ Push(Smi::FromInt(const_index));
4399  } else {
4400  Register index = ToRegister(instr->index());
4401  __ Integer32ToSmi(index, index);
4402  __ Push(index);
4403  }
4404  CallRuntimeFromDeferred(
4405  Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
4406  __ AssertSmi(rax);
4407  __ SmiToInteger32(rax, rax);
4408  __ StoreToSafepointRegisterSlot(result, rax);
4409 }
4410 
4411 
4412 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4413  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4414  public:
4415  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4416  : LDeferredCode(codegen), instr_(instr) { }
4417  virtual void Generate() V8_OVERRIDE {
4418  codegen()->DoDeferredStringCharFromCode(instr_);
4419  }
4420  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4421  private:
4422  LStringCharFromCode* instr_;
4423  };
4424 
4425  DeferredStringCharFromCode* deferred =
4426  new(zone()) DeferredStringCharFromCode(this, instr);
4427 
4428  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4429  Register char_code = ToRegister(instr->char_code());
4430  Register result = ToRegister(instr->result());
4431  ASSERT(!char_code.is(result));
4432 
4433  __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
4434  __ j(above, deferred->entry());
4435  __ movsxlq(char_code, char_code);
4436  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4437  __ movp(result, FieldOperand(result,
4438  char_code, times_pointer_size,
4440  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4441  __ j(equal, deferred->entry());
4442  __ bind(deferred->exit());
4443 }
4444 
4445 
4446 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4447  Register char_code = ToRegister(instr->char_code());
4448  Register result = ToRegister(instr->result());
4449 
4450  // TODO(3095996): Get rid of this. For now, we need to make the
4451  // result register contain a valid pointer because it is already
4452  // contained in the register pointer map.
4453  __ Set(result, 0);
4454 
4455  PushSafepointRegistersScope scope(this);
4456  __ Integer32ToSmi(char_code, char_code);
4457  __ Push(char_code);
4458  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4459  __ StoreToSafepointRegisterSlot(result, rax);
4460 }
4461 
4462 
4463 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4464  LOperand* input = instr->value();
4465  ASSERT(input->IsRegister() || input->IsStackSlot());
4466  LOperand* output = instr->result();
4467  ASSERT(output->IsDoubleRegister());
4468  if (input->IsRegister()) {
4469  __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4470  } else {
4471  __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4472  }
4473 }
4474 
4475 
4476 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4477  LOperand* input = instr->value();
4478  LOperand* output = instr->result();
4479  LOperand* temp = instr->temp();
4480 
4481  __ LoadUint32(ToDoubleRegister(output),
4482  ToRegister(input),
4483  ToDoubleRegister(temp));
4484 }
4485 
4486 
4487 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4488  LOperand* input = instr->value();
4489  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4490  Register reg = ToRegister(input);
4491 
4492  __ Integer32ToSmi(reg, reg);
4493 }
4494 
4495 
4496 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4497  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4498  public:
4499  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4500  : LDeferredCode(codegen), instr_(instr) { }
4501  virtual void Generate() V8_OVERRIDE {
4502  codegen()->DoDeferredNumberTagU(instr_);
4503  }
4504  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4505  private:
4506  LNumberTagU* instr_;
4507  };
4508 
4509  LOperand* input = instr->value();
4510  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4511  Register reg = ToRegister(input);
4512 
4513  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4514  __ cmpl(reg, Immediate(Smi::kMaxValue));
4515  __ j(above, deferred->entry());
4516  __ Integer32ToSmi(reg, reg);
4517  __ bind(deferred->exit());
4518 }
4519 
4520 
4521 void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
4522  Label done, slow;
4523  Register reg = ToRegister(instr->value());
4524  Register tmp = ToRegister(instr->temp1());
4525  XMMRegister temp_xmm = ToDoubleRegister(instr->temp2());
4526 
4527  // Load value into temp_xmm which will be preserved across potential call to
4528  // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4529  // XMM registers on x64).
4530  XMMRegister xmm_scratch = double_scratch0();
4531  __ LoadUint32(temp_xmm, reg, xmm_scratch);
4532 
4533  if (FLAG_inline_new) {
4534  __ AllocateHeapNumber(reg, tmp, &slow);
4535  __ jmp(&done, Label::kNear);
4536  }
4537 
4538  // Slow case: Call the runtime system to do the number allocation.
4539  __ bind(&slow);
4540  {
4541  // Put a valid pointer value in the stack slot where the result
4542  // register is stored, as this register is in the pointer map, but contains
4543  // an integer value.
4544  __ Set(reg, 0);
4545 
4546  // Preserve the value of all registers.
4547  PushSafepointRegistersScope scope(this);
4548 
4549  // NumberTagU uses the context from the frame, rather than
4550  // the environment's HContext or HInlinedContext value.
4551  // They only call Runtime::kHiddenAllocateHeapNumber.
4552  // The corresponding HChange instructions are added in a phase that does
4553  // not have easy access to the local context.
4555  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4556  RecordSafepointWithRegisters(
4557  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4558  __ StoreToSafepointRegisterSlot(reg, rax);
4559  }
4560 
4561  // Done. Put the value in temp_xmm into the value of the allocated heap
4562  // number.
4563  __ bind(&done);
4564  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
4565 }
4566 
4567 
4568 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4569  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4570  public:
4571  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4572  : LDeferredCode(codegen), instr_(instr) { }
4573  virtual void Generate() V8_OVERRIDE {
4574  codegen()->DoDeferredNumberTagD(instr_);
4575  }
4576  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4577  private:
4578  LNumberTagD* instr_;
4579  };
4580 
4581  XMMRegister input_reg = ToDoubleRegister(instr->value());
4582  Register reg = ToRegister(instr->result());
4583  Register tmp = ToRegister(instr->temp());
4584 
4585  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4586  if (FLAG_inline_new) {
4587  __ AllocateHeapNumber(reg, tmp, deferred->entry());
4588  } else {
4589  __ jmp(deferred->entry());
4590  }
4591  __ bind(deferred->exit());
4592  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4593 }
4594 
4595 
4596 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4597  // TODO(3095996): Get rid of this. For now, we need to make the
4598  // result register contain a valid pointer because it is already
4599  // contained in the register pointer map.
4600  Register reg = ToRegister(instr->result());
4601  __ Move(reg, Smi::FromInt(0));
4602 
4603  {
4604  PushSafepointRegistersScope scope(this);
4605  // NumberTagD uses the context from the frame, rather than
4606  // the environment's HContext or HInlinedContext value.
4607  // They only call Runtime::kHiddenAllocateHeapNumber.
4608  // The corresponding HChange instructions are added in a phase that does
4609  // not have easy access to the local context.
4611  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4612  RecordSafepointWithRegisters(
4613  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4614  __ movp(kScratchRegister, rax);
4615  }
4616  __ movp(reg, kScratchRegister);
4617 }
4618 
4619 
4620 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4621  HChange* hchange = instr->hydrogen();
4622  Register input = ToRegister(instr->value());
4623  Register output = ToRegister(instr->result());
4624  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4625  hchange->value()->CheckFlag(HValue::kUint32)) {
4626  __ testl(input, input);
4627  DeoptimizeIf(sign, instr->environment());
4628  }
4629  __ Integer32ToSmi(output, input);
4630  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4631  !hchange->value()->CheckFlag(HValue::kUint32)) {
4632  DeoptimizeIf(overflow, instr->environment());
4633  }
4634 }
4635 
4636 
4637 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4638  ASSERT(instr->value()->Equals(instr->result()));
4639  Register input = ToRegister(instr->value());
4640  if (instr->needs_check()) {
4641  Condition is_smi = __ CheckSmi(input);
4642  DeoptimizeIf(NegateCondition(is_smi), instr->environment());
4643  } else {
4644  __ AssertSmi(input);
4645  }
4646  __ SmiToInteger32(input, input);
4647 }
4648 
4649 
4650 void LCodeGen::EmitNumberUntagD(Register input_reg,
4651  XMMRegister result_reg,
4652  bool can_convert_undefined_to_nan,
4653  bool deoptimize_on_minus_zero,
4654  LEnvironment* env,
4655  NumberUntagDMode mode) {
4656  Label convert, load_smi, done;
4657 
4658  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4659  // Smi check.
4660  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4661 
4662  // Heap number map check.
4663  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4664  Heap::kHeapNumberMapRootIndex);
4665 
4666  // On x64 it is safe to load at heap number offset before evaluating the map
4667  // check, since all heap objects are at least two words long.
4668  __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4669 
4670  if (can_convert_undefined_to_nan) {
4671  __ j(not_equal, &convert, Label::kNear);
4672  } else {
4673  DeoptimizeIf(not_equal, env);
4674  }
4675 
4676  if (deoptimize_on_minus_zero) {
4677  XMMRegister xmm_scratch = double_scratch0();
4678  __ xorps(xmm_scratch, xmm_scratch);
4679  __ ucomisd(xmm_scratch, result_reg);
4680  __ j(not_equal, &done, Label::kNear);
4681  __ movmskpd(kScratchRegister, result_reg);
4682  __ testq(kScratchRegister, Immediate(1));
4683  DeoptimizeIf(not_zero, env);
4684  }
4685  __ jmp(&done, Label::kNear);
4686 
4687  if (can_convert_undefined_to_nan) {
4688  __ bind(&convert);
4689 
4690  // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
4691  __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4692  DeoptimizeIf(not_equal, env);
4693 
4694  __ xorps(result_reg, result_reg);
4695  __ divsd(result_reg, result_reg);
4696  __ jmp(&done, Label::kNear);
4697  }
4698  } else {
4700  }
4701 
4702  // Smi to XMM conversion
4703  __ bind(&load_smi);
4704  __ SmiToInteger32(kScratchRegister, input_reg);
4705  __ Cvtlsi2sd(result_reg, kScratchRegister);
4706  __ bind(&done);
4707 }
4708 
4709 
4710 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4711  Register input_reg = ToRegister(instr->value());
4712 
4713  if (instr->truncating()) {
4714  Label no_heap_number, check_bools, check_false;
4715 
4716  // Heap number map check.
4717  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4718  Heap::kHeapNumberMapRootIndex);
4719  __ j(not_equal, &no_heap_number, Label::kNear);
4720  __ TruncateHeapNumberToI(input_reg, input_reg);
4721  __ jmp(done);
4722 
4723  __ bind(&no_heap_number);
4724  // Check for Oddballs. Undefined/False is converted to zero and True to one
4725  // for truncating conversions.
4726  __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4727  __ j(not_equal, &check_bools, Label::kNear);
4728  __ Set(input_reg, 0);
4729  __ jmp(done);
4730 
4731  __ bind(&check_bools);
4732  __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
4733  __ j(not_equal, &check_false, Label::kNear);
4734  __ Set(input_reg, 1);
4735  __ jmp(done);
4736 
4737  __ bind(&check_false);
4738  __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
4739  __ RecordComment("Deferred TaggedToI: cannot truncate");
4740  DeoptimizeIf(not_equal, instr->environment());
4741  __ Set(input_reg, 0);
4742  __ jmp(done);
4743  } else {
4744  Label bailout;
4745  XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4746  __ TaggedToI(input_reg, input_reg, xmm_temp,
4747  instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
4748 
4749  __ jmp(done);
4750  __ bind(&bailout);
4751  DeoptimizeIf(no_condition, instr->environment());
4752  }
4753 }
4754 
4755 
4756 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4757  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
4758  public:
4759  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4760  : LDeferredCode(codegen), instr_(instr) { }
4761  virtual void Generate() V8_OVERRIDE {
4762  codegen()->DoDeferredTaggedToI(instr_, done());
4763  }
4764  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4765  private:
4766  LTaggedToI* instr_;
4767  };
4768 
4769  LOperand* input = instr->value();
4770  ASSERT(input->IsRegister());
4771  ASSERT(input->Equals(instr->result()));
4772  Register input_reg = ToRegister(input);
4773 
4774  if (instr->hydrogen()->value()->representation().IsSmi()) {
4775  __ SmiToInteger32(input_reg, input_reg);
4776  } else {
4777  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4778  __ JumpIfNotSmi(input_reg, deferred->entry());
4779  __ SmiToInteger32(input_reg, input_reg);
4780  __ bind(deferred->exit());
4781  }
4782 }
4783 
4784 
4785 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4786  LOperand* input = instr->value();
4787  ASSERT(input->IsRegister());
4788  LOperand* result = instr->result();
4789  ASSERT(result->IsDoubleRegister());
4790 
4791  Register input_reg = ToRegister(input);
4792  XMMRegister result_reg = ToDoubleRegister(result);
4793 
4794  HValue* value = instr->hydrogen()->value();
4795  NumberUntagDMode mode = value->representation().IsSmi()
4797 
4798  EmitNumberUntagD(input_reg, result_reg,
4799  instr->hydrogen()->can_convert_undefined_to_nan(),
4800  instr->hydrogen()->deoptimize_on_minus_zero(),
4801  instr->environment(),
4802  mode);
4803 }
4804 
4805 
4806 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4807  LOperand* input = instr->value();
4808  ASSERT(input->IsDoubleRegister());
4809  LOperand* result = instr->result();
4810  ASSERT(result->IsRegister());
4811 
4812  XMMRegister input_reg = ToDoubleRegister(input);
4813  Register result_reg = ToRegister(result);
4814 
4815  if (instr->truncating()) {
4816  __ TruncateDoubleToI(result_reg, input_reg);
4817  } else {
4818  Label bailout, done;
4819  XMMRegister xmm_scratch = double_scratch0();
4820  __ DoubleToI(result_reg, input_reg, xmm_scratch,
4821  instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
4822 
4823  __ jmp(&done, Label::kNear);
4824  __ bind(&bailout);
4825  DeoptimizeIf(no_condition, instr->environment());
4826  __ bind(&done);
4827  }
4828 }
4829 
4830 
4831 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4832  LOperand* input = instr->value();
4833  ASSERT(input->IsDoubleRegister());
4834  LOperand* result = instr->result();
4835  ASSERT(result->IsRegister());
4836 
4837  XMMRegister input_reg = ToDoubleRegister(input);
4838  Register result_reg = ToRegister(result);
4839 
4840  Label bailout, done;
4841  XMMRegister xmm_scratch = double_scratch0();
4842  __ DoubleToI(result_reg, input_reg, xmm_scratch,
4843  instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
4844 
4845  __ jmp(&done, Label::kNear);
4846  __ bind(&bailout);
4847  DeoptimizeIf(no_condition, instr->environment());
4848  __ bind(&done);
4849 
4850  __ Integer32ToSmi(result_reg, result_reg);
4851  DeoptimizeIf(overflow, instr->environment());
4852 }
4853 
4854 
4855 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4856  LOperand* input = instr->value();
4857  Condition cc = masm()->CheckSmi(ToRegister(input));
4858  DeoptimizeIf(NegateCondition(cc), instr->environment());
4859 }
4860 
4861 
4862 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4863  if (!instr->hydrogen()->value()->IsHeapObject()) {
4864  LOperand* input = instr->value();
4865  Condition cc = masm()->CheckSmi(ToRegister(input));
4866  DeoptimizeIf(cc, instr->environment());
4867  }
4868 }
4869 
4870 
4871 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4872  Register input = ToRegister(instr->value());
4873 
4875 
4876  if (instr->hydrogen()->is_interval_check()) {
4877  InstanceType first;
4878  InstanceType last;
4879  instr->hydrogen()->GetCheckInterval(&first, &last);
4880 
4882  Immediate(static_cast<int8_t>(first)));
4883 
4884  // If there is only one type in the interval check for equality.
4885  if (first == last) {
4886  DeoptimizeIf(not_equal, instr->environment());
4887  } else {
4888  DeoptimizeIf(below, instr->environment());
4889  // Omit check for the last type.
4890  if (last != LAST_TYPE) {
4892  Immediate(static_cast<int8_t>(last)));
4893  DeoptimizeIf(above, instr->environment());
4894  }
4895  }
4896  } else {
4897  uint8_t mask;
4898  uint8_t tag;
4899  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4900 
4901  if (IsPowerOf2(mask)) {
4902  ASSERT(tag == 0 || IsPowerOf2(tag));
4904  Immediate(mask));
4905  DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
4906  } else {
4907  __ movzxbl(kScratchRegister,
4909  __ andb(kScratchRegister, Immediate(mask));
4910  __ cmpb(kScratchRegister, Immediate(tag));
4911  DeoptimizeIf(not_equal, instr->environment());
4912  }
4913  }
4914 }
4915 
4916 
4917 void LCodeGen::DoCheckValue(LCheckValue* instr) {
4918  Register reg = ToRegister(instr->value());
4919  __ Cmp(reg, instr->hydrogen()->object().handle());
4920  DeoptimizeIf(not_equal, instr->environment());
4921 }
4922 
4923 
4924 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4925  {
4926  PushSafepointRegistersScope scope(this);
4927  __ Push(object);
4928  __ Set(rsi, 0);
4929  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4930  RecordSafepointWithRegisters(
4931  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4932 
4933  __ testp(rax, Immediate(kSmiTagMask));
4934  }
4935  DeoptimizeIf(zero, instr->environment());
4936 }
4937 
4938 
4939 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4940  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
4941  public:
4942  DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4943  : LDeferredCode(codegen), instr_(instr), object_(object) {
4944  SetExit(check_maps());
4945  }
4946  virtual void Generate() V8_OVERRIDE {
4947  codegen()->DoDeferredInstanceMigration(instr_, object_);
4948  }
4949  Label* check_maps() { return &check_maps_; }
4950  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4951  private:
4952  LCheckMaps* instr_;
4953  Label check_maps_;
4954  Register object_;
4955  };
4956 
4957  if (instr->hydrogen()->CanOmitMapChecks()) return;
4958 
4959  LOperand* input = instr->value();
4960  ASSERT(input->IsRegister());
4961  Register reg = ToRegister(input);
4962 
4963  DeferredCheckMaps* deferred = NULL;
4964  if (instr->hydrogen()->has_migration_target()) {
4965  deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
4966  __ bind(deferred->check_maps());
4967  }
4968 
4969  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
4970  Label success;
4971  for (int i = 0; i < map_set.size() - 1; i++) {
4972  Handle<Map> map = map_set.at(i).handle();
4973  __ CompareMap(reg, map);
4974  __ j(equal, &success, Label::kNear);
4975  }
4976 
4977  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
4978  __ CompareMap(reg, map);
4979  if (instr->hydrogen()->has_migration_target()) {
4980  __ j(not_equal, deferred->entry());
4981  } else {
4982  DeoptimizeIf(not_equal, instr->environment());
4983  }
4984 
4985  __ bind(&success);
4986 }
4987 
4988 
4989 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4990  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
4991  XMMRegister xmm_scratch = double_scratch0();
4992  Register result_reg = ToRegister(instr->result());
4993  __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
4994 }
4995 
4996 
4997 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4998  ASSERT(instr->unclamped()->Equals(instr->result()));
4999  Register value_reg = ToRegister(instr->result());
5000  __ ClampUint8(value_reg);
5001 }
5002 
5003 
5004 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5005  ASSERT(instr->unclamped()->Equals(instr->result()));
5006  Register input_reg = ToRegister(instr->unclamped());
5007  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5008  XMMRegister xmm_scratch = double_scratch0();
5009  Label is_smi, done, heap_number;
5010  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5011  __ JumpIfSmi(input_reg, &is_smi, dist);
5012 
5013  // Check for heap number
5014  __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5015  factory()->heap_number_map());
5016  __ j(equal, &heap_number, Label::kNear);
5017 
5018  // Check for undefined. Undefined is converted to zero for clamping
5019  // conversions.
5020  __ Cmp(input_reg, factory()->undefined_value());
5021  DeoptimizeIf(not_equal, instr->environment());
5022  __ xorl(input_reg, input_reg);
5023  __ jmp(&done, Label::kNear);
5024 
5025  // Heap number
5026  __ bind(&heap_number);
5027  __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5028  __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5029  __ jmp(&done, Label::kNear);
5030 
5031  // smi
5032  __ bind(&is_smi);
5033  __ SmiToInteger32(input_reg, input_reg);
5034  __ ClampUint8(input_reg);
5035 
5036  __ bind(&done);
5037 }
5038 
5039 
5040 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5041  XMMRegister value_reg = ToDoubleRegister(instr->value());
5042  Register result_reg = ToRegister(instr->result());
5043  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5044  __ movq(result_reg, value_reg);
5045  __ shr(result_reg, Immediate(32));
5046  } else {
5047  __ movd(result_reg, value_reg);
5048  }
5049 }
5050 
5051 
5052 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5053  Register hi_reg = ToRegister(instr->hi());
5054  Register lo_reg = ToRegister(instr->lo());
5055  XMMRegister result_reg = ToDoubleRegister(instr->result());
5056  XMMRegister xmm_scratch = double_scratch0();
5057  __ movd(result_reg, hi_reg);
5058  __ psllq(result_reg, 32);
5059  __ movd(xmm_scratch, lo_reg);
5060  __ orps(result_reg, xmm_scratch);
5061 }
5062 
5063 
5064 void LCodeGen::DoAllocate(LAllocate* instr) {
5065  class DeferredAllocate V8_FINAL : public LDeferredCode {
5066  public:
5067  DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5068  : LDeferredCode(codegen), instr_(instr) { }
5069  virtual void Generate() V8_OVERRIDE {
5070  codegen()->DoDeferredAllocate(instr_);
5071  }
5072  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5073  private:
5074  LAllocate* instr_;
5075  };
5076 
5077  DeferredAllocate* deferred =
5078  new(zone()) DeferredAllocate(this, instr);
5079 
5080  Register result = ToRegister(instr->result());
5081  Register temp = ToRegister(instr->temp());
5082 
5083  // Allocate memory for the object.
5084  AllocationFlags flags = TAG_OBJECT;
5085  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5086  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5087  }
5088  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5089  ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5090  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5091  flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5092  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5093  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5094  flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5095  }
5096 
5097  if (instr->size()->IsConstantOperand()) {
5098  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5099  if (size <= Page::kMaxRegularHeapObjectSize) {
5100  __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5101  } else {
5102  __ jmp(deferred->entry());
5103  }
5104  } else {
5105  Register size = ToRegister(instr->size());
5106  __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5107  }
5108 
5109  __ bind(deferred->exit());
5110 
5111  if (instr->hydrogen()->MustPrefillWithFiller()) {
5112  if (instr->size()->IsConstantOperand()) {
5113  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5114  __ movl(temp, Immediate((size / kPointerSize) - 1));
5115  } else {
5116  temp = ToRegister(instr->size());
5117  __ sar(temp, Immediate(kPointerSizeLog2));
5118  __ decl(temp);
5119  }
5120  Label loop;
5121  __ bind(&loop);
5122  __ Move(FieldOperand(result, temp, times_pointer_size, 0),
5123  isolate()->factory()->one_pointer_filler_map());
5124  __ decl(temp);
5125  __ j(not_zero, &loop);
5126  }
5127 }
5128 
5129 
5130 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5131  Register result = ToRegister(instr->result());
5132 
5133  // TODO(3095996): Get rid of this. For now, we need to make the
5134  // result register contain a valid pointer because it is already
5135  // contained in the register pointer map.
5136  __ Move(result, Smi::FromInt(0));
5137 
5138  PushSafepointRegistersScope scope(this);
5139  if (instr->size()->IsRegister()) {
5140  Register size = ToRegister(instr->size());
5141  ASSERT(!size.is(result));
5142  __ Integer32ToSmi(size, size);
5143  __ Push(size);
5144  } else {
5145  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5146  __ Push(Smi::FromInt(size));
5147  }
5148 
5149  int flags = 0;
5150  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5151  ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5152  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5154  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5155  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5157  } else {
5158  flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5159  }
5160  __ Push(Smi::FromInt(flags));
5161 
5162  CallRuntimeFromDeferred(
5163  Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5164  __ StoreToSafepointRegisterSlot(result, rax);
5165 }
5166 
5167 
5168 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5169  ASSERT(ToRegister(instr->value()).is(rax));
5170  __ Push(rax);
5171  CallRuntime(Runtime::kToFastProperties, 1, instr);
5172 }
5173 
5174 
5175 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5176  ASSERT(ToRegister(instr->context()).is(rsi));
5177  Label materialized;
5178  // Registers will be used as follows:
5179  // rcx = literals array.
5180  // rbx = regexp literal.
5181  // rax = regexp literal clone.
5182  int literal_offset =
5183  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5184  __ Move(rcx, instr->hydrogen()->literals());
5185  __ movp(rbx, FieldOperand(rcx, literal_offset));
5186  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
5187  __ j(not_equal, &materialized, Label::kNear);
5188 
5189  // Create regexp literal using runtime function
5190  // Result will be in rax.
5191  __ Push(rcx);
5192  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
5193  __ Push(instr->hydrogen()->pattern());
5194  __ Push(instr->hydrogen()->flags());
5195  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5196  __ movp(rbx, rax);
5197 
5198  __ bind(&materialized);
5200  Label allocated, runtime_allocate;
5201  __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
5202  __ jmp(&allocated, Label::kNear);
5203 
5204  __ bind(&runtime_allocate);
5205  __ Push(rbx);
5206  __ Push(Smi::FromInt(size));
5207  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5208  __ Pop(rbx);
5209 
5210  __ bind(&allocated);
5211  // Copy the content into the newly allocated memory.
5212  // (Unroll copy loop once for better throughput).
5213  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5214  __ movp(rdx, FieldOperand(rbx, i));
5215  __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
5216  __ movp(FieldOperand(rax, i), rdx);
5217  __ movp(FieldOperand(rax, i + kPointerSize), rcx);
5218  }
5219  if ((size % (2 * kPointerSize)) != 0) {
5220  __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
5221  __ movp(FieldOperand(rax, size - kPointerSize), rdx);
5222  }
5223 }
5224 
5225 
5226 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5227  ASSERT(ToRegister(instr->context()).is(rsi));
5228  // Use the fast case closure allocation code that allocates in new
5229  // space for nested functions that don't need literals cloning.
5230  bool pretenure = instr->hydrogen()->pretenure();
5231  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5232  FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
5233  instr->hydrogen()->is_generator());
5234  __ Move(rbx, instr->hydrogen()->shared_info());
5235  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5236  } else {
5237  __ Push(rsi);
5238  __ Push(instr->hydrogen()->shared_info());
5239  __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
5240  Heap::kFalseValueRootIndex);
5241  CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
5242  }
5243 }
5244 
5245 
5246 void LCodeGen::DoTypeof(LTypeof* instr) {
5247  ASSERT(ToRegister(instr->context()).is(rsi));
5248  LOperand* input = instr->value();
5249  EmitPushTaggedOperand(input);
5250  CallRuntime(Runtime::kTypeof, 1, instr);
5251 }
5252 
5253 
5254 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5255  ASSERT(!operand->IsDoubleRegister());
5256  if (operand->IsConstantOperand()) {
5257  __ Push(ToHandle(LConstantOperand::cast(operand)));
5258  } else if (operand->IsRegister()) {
5259  __ Push(ToRegister(operand));
5260  } else {
5261  __ Push(ToOperand(operand));
5262  }
5263 }
5264 
5265 
5266 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5267  Register input = ToRegister(instr->value());
5268  Condition final_branch_condition = EmitTypeofIs(instr, input);
5269  if (final_branch_condition != no_condition) {
5270  EmitBranch(instr, final_branch_condition);
5271  }
5272 }
5273 
5274 
5275 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5276  Label* true_label = instr->TrueLabel(chunk_);
5277  Label* false_label = instr->FalseLabel(chunk_);
5278  Handle<String> type_name = instr->type_literal();
5279  int left_block = instr->TrueDestination(chunk_);
5280  int right_block = instr->FalseDestination(chunk_);
5281  int next_block = GetNextEmittedBlock();
5282 
5283  Label::Distance true_distance = left_block == next_block ? Label::kNear
5284  : Label::kFar;
5285  Label::Distance false_distance = right_block == next_block ? Label::kNear
5286  : Label::kFar;
5287  Condition final_branch_condition = no_condition;
5288  if (type_name->Equals(heap()->number_string())) {
5289  __ JumpIfSmi(input, true_label, true_distance);
5290  __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
5291  Heap::kHeapNumberMapRootIndex);
5292 
5293  final_branch_condition = equal;
5294 
5295  } else if (type_name->Equals(heap()->string_string())) {
5296  __ JumpIfSmi(input, false_label, false_distance);
5297  __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5298  __ j(above_equal, false_label, false_distance);
5299  __ testb(FieldOperand(input, Map::kBitFieldOffset),
5300  Immediate(1 << Map::kIsUndetectable));
5301  final_branch_condition = zero;
5302 
5303  } else if (type_name->Equals(heap()->symbol_string())) {
5304  __ JumpIfSmi(input, false_label, false_distance);
5305  __ CmpObjectType(input, SYMBOL_TYPE, input);
5306  final_branch_condition = equal;
5307 
5308  } else if (type_name->Equals(heap()->boolean_string())) {
5309  __ CompareRoot(input, Heap::kTrueValueRootIndex);
5310  __ j(equal, true_label, true_distance);
5311  __ CompareRoot(input, Heap::kFalseValueRootIndex);
5312  final_branch_condition = equal;
5313 
5314  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5315  __ CompareRoot(input, Heap::kNullValueRootIndex);
5316  final_branch_condition = equal;
5317 
5318  } else if (type_name->Equals(heap()->undefined_string())) {
5319  __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5320  __ j(equal, true_label, true_distance);
5321  __ JumpIfSmi(input, false_label, false_distance);
5322  // Check for undetectable objects => true.
5323  __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5324  __ testb(FieldOperand(input, Map::kBitFieldOffset),
5325  Immediate(1 << Map::kIsUndetectable));
5326  final_branch_condition = not_zero;
5327 
5328  } else if (type_name->Equals(heap()->function_string())) {
5330  __ JumpIfSmi(input, false_label, false_distance);
5331  __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5332  __ j(equal, true_label, true_distance);
5333  __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5334  final_branch_condition = equal;
5335 
5336  } else if (type_name->Equals(heap()->object_string())) {
5337  __ JumpIfSmi(input, false_label, false_distance);
5338  if (!FLAG_harmony_typeof) {
5339  __ CompareRoot(input, Heap::kNullValueRootIndex);
5340  __ j(equal, true_label, true_distance);
5341  }
5342  __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5343  __ j(below, false_label, false_distance);
5344  __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5345  __ j(above, false_label, false_distance);
5346  // Check for undetectable objects => false.
5347  __ testb(FieldOperand(input, Map::kBitFieldOffset),
5348  Immediate(1 << Map::kIsUndetectable));
5349  final_branch_condition = zero;
5350 
5351  } else {
5352  __ jmp(false_label, false_distance);
5353  }
5354 
5355  return final_branch_condition;
5356 }
5357 
5358 
5359 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5360  Register temp = ToRegister(instr->temp());
5361 
5362  EmitIsConstructCall(temp);
5363  EmitBranch(instr, equal);
5364 }
5365 
5366 
5367 void LCodeGen::EmitIsConstructCall(Register temp) {
5368  // Get the frame pointer for the calling frame.
5369  __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
5370 
5371  // Skip the arguments adaptor frame if it exists.
5372  Label check_frame_marker;
5373  __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5375  __ j(not_equal, &check_frame_marker, Label::kNear);
5376  __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5377 
5378  // Check the marker in the calling frame.
5379  __ bind(&check_frame_marker);
5380  __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5381  Smi::FromInt(StackFrame::CONSTRUCT));
5382 }
5383 
5384 
5385 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5386  if (!info()->IsStub()) {
5387  // Ensure that we have enough space after the previous lazy-bailout
5388  // instruction for patching the code here.
5389  int current_pc = masm()->pc_offset();
5390  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5391  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5392  __ Nop(padding_size);
5393  }
5394  }
5395  last_lazy_deopt_pc_ = masm()->pc_offset();
5396 }
5397 
5398 
5399 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5400  last_lazy_deopt_pc_ = masm()->pc_offset();
5401  ASSERT(instr->HasEnvironment());
5402  LEnvironment* env = instr->environment();
5403  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5404  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5405 }
5406 
5407 
5408 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5409  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5410  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5411  // needed return address), even though the implementation of LAZY and EAGER is
5412  // now identical. When LAZY is eventually completely folded into EAGER, remove
5413  // the special case below.
5414  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5415  type = Deoptimizer::LAZY;
5416  }
5417 
5418  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5419  DeoptimizeIf(no_condition, instr->environment(), type);
5420 }
5421 
5422 
5423 void LCodeGen::DoDummy(LDummy* instr) {
5424  // Nothing to see here, move on!
5425 }
5426 
5427 
5428 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5429  // Nothing to see here, move on!
5430 }
5431 
5432 
5433 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5434  PushSafepointRegistersScope scope(this);
5436  __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
5437  RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5438  ASSERT(instr->HasEnvironment());
5439  LEnvironment* env = instr->environment();
5440  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5441 }
5442 
5443 
5444 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5445  class DeferredStackCheck V8_FINAL : public LDeferredCode {
5446  public:
5447  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5448  : LDeferredCode(codegen), instr_(instr) { }
5449  virtual void Generate() V8_OVERRIDE {
5450  codegen()->DoDeferredStackCheck(instr_);
5451  }
5452  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5453  private:
5454  LStackCheck* instr_;
5455  };
5456 
5457  ASSERT(instr->HasEnvironment());
5458  LEnvironment* env = instr->environment();
5459  // There is no LLazyBailout instruction for stack-checks. We have to
5460  // prepare for lazy deoptimization explicitly here.
5461  if (instr->hydrogen()->is_function_entry()) {
5462  // Perform stack overflow check.
5463  Label done;
5464  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5465  __ j(above_equal, &done, Label::kNear);
5466 
5467  ASSERT(instr->context()->IsRegister());
5468  ASSERT(ToRegister(instr->context()).is(rsi));
5469  CallCode(isolate()->builtins()->StackCheck(),
5470  RelocInfo::CODE_TARGET,
5471  instr);
5472  __ bind(&done);
5473  } else {
5474  ASSERT(instr->hydrogen()->is_backwards_branch());
5475  // Perform stack overflow check if this goto needs it before jumping.
5476  DeferredStackCheck* deferred_stack_check =
5477  new(zone()) DeferredStackCheck(this, instr);
5478  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5479  __ j(below, deferred_stack_check->entry());
5480  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5481  __ bind(instr->done_label());
5482  deferred_stack_check->SetExit(instr->done_label());
5483  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5484  // Don't record a deoptimization index for the safepoint here.
5485  // This will be done explicitly when emitting call and the safepoint in
5486  // the deferred code.
5487  }
5488 }
5489 
5490 
5491 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5492  // This is a pseudo-instruction that ensures that the environment here is
5493  // properly registered for deoptimization and records the assembler's PC
5494  // offset.
5495  LEnvironment* environment = instr->environment();
5496 
5497  // If the environment were already registered, we would have no way of
5498  // backpatching it with the spill slot operands.
5499  ASSERT(!environment->HasBeenRegistered());
5500  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5501 
5502  GenerateOsrPrologue();
5503 }
5504 
5505 
5506 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5507  ASSERT(ToRegister(instr->context()).is(rsi));
5508  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
5509  DeoptimizeIf(equal, instr->environment());
5510 
5511  Register null_value = rdi;
5512  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5513  __ cmpp(rax, null_value);
5514  DeoptimizeIf(equal, instr->environment());
5515 
5516  Condition cc = masm()->CheckSmi(rax);
5517  DeoptimizeIf(cc, instr->environment());
5518 
5520  __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
5521  DeoptimizeIf(below_equal, instr->environment());
5522 
5523  Label use_cache, call_runtime;
5524  __ CheckEnumCache(null_value, &call_runtime);
5525 
5527  __ jmp(&use_cache, Label::kNear);
5528 
5529  // Get the set of properties to enumerate.
5530  __ bind(&call_runtime);
5531  __ Push(rax);
5532  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5533 
5534  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
5535  Heap::kMetaMapRootIndex);
5536  DeoptimizeIf(not_equal, instr->environment());
5537  __ bind(&use_cache);
5538 }
5539 
5540 
5541 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5542  Register map = ToRegister(instr->map());
5543  Register result = ToRegister(instr->result());
5544  Label load_cache, done;
5545  __ EnumLength(result, map);
5546  __ Cmp(result, Smi::FromInt(0));
5547  __ j(not_equal, &load_cache, Label::kNear);
5548  __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5549  __ jmp(&done, Label::kNear);
5550  __ bind(&load_cache);
5551  __ LoadInstanceDescriptors(map, result);
5552  __ movp(result,
5554  __ movp(result,
5555  FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5556  __ bind(&done);
5557  Condition cc = masm()->CheckSmi(result);
5558  DeoptimizeIf(cc, instr->environment());
5559 }
5560 
5561 
5562 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5563  Register object = ToRegister(instr->value());
5564  __ cmpp(ToRegister(instr->map()),
5566  DeoptimizeIf(not_equal, instr->environment());
5567 }
5568 
5569 
5570 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5571  Register object = ToRegister(instr->object());
5572  Register index = ToRegister(instr->index());
5573 
5574  Label out_of_object, done;
5575  __ SmiToInteger32(index, index);
5576  __ cmpl(index, Immediate(0));
5577  __ j(less, &out_of_object, Label::kNear);
5578  __ movp(object, FieldOperand(object,
5579  index,
5582  __ jmp(&done, Label::kNear);
5583 
5584  __ bind(&out_of_object);
5585  __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
5586  __ negl(index);
5587  // Index is now equal to out of object property index plus 1.
5588  __ movp(object, FieldOperand(object,
5589  index,
5591  FixedArray::kHeaderSize - kPointerSize));
5592  __ bind(&done);
5593 }
5594 
5595 
5596 #undef __
5597 
5598 } } // namespace v8::internal
5599 
5600 #endif // V8_TARGET_ARCH_X64
byte * Address
Definition: globals.h:186
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1267
const Register rdx
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static RelocInfo::Mode RelocInfoNone()
void FinishCode(Handle< Code > code)
static const int kHashFieldOffset
Definition: objects.h:8629
const int kMinInt
Definition: globals.h:249
static const int kBitFieldOffset
Definition: objects.h:6461
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
const intptr_t kSmiTagMask
Definition: v8.h:5480
static const int kCodeEntryOffset
Definition: objects.h:7518
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
static int SlotOffset(int index)
Definition: contexts.h:498
static Representation Smi()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static const int kEnumCacheOffset
Definition: objects.h:3499
static const int kMaxUtf16CodeUnit
Definition: objects.h:8916
const uint32_t kTwoByteStringTag
Definition: objects.h:610
int StackSlotOffset(int index)
Definition: lithium.cc:240
RegisterType type() const
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
Smi * ToSmi(LConstantOperand *op) const
const Register rbp
const int KB
Definition: globals.h:245
static Handle< Code > GetUninitialized(Isolate *isolate, Token::Value op)
Definition: ic.cc:2489
static const int kDataOffset
Definition: objects.h:4970
static Handle< T > cast(Handle< S > that)
Definition: handles.h:75
static const int kGlobalReceiverOffset
Definition: objects.h:7613
static Representation Integer32()
const Register rsi
static const int kNativeByteOffset
Definition: objects.h:7267
int int32_t
Definition: unicode.cc:47
static XMMRegister FromAllocationIndex(int index)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8673
static bool IsSupported(CpuFeature f)
Definition: assembler-arm.h:68
static const int kStrictModeBitWithinByte
Definition: objects.h:7258
AllocationSiteOverrideMode
Definition: code-stubs.h:759
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
static const int kSize
Definition: objects.h:7922
#define ASSERT(condition)
Definition: checks.h:329
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
static const int kContextOffset
Definition: frames.h:185
const int kPointerSizeLog2
Definition: globals.h:281
static const int kInObjectFieldCount
Definition: objects.h:7976
const uint32_t kStringRepresentationMask
Definition: objects.h:615
MemOperand GlobalObjectOperand()
static const int kCallerFPOffset
Definition: frames.h:188
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
static Handle< Code > initialize_stub(Isolate *isolate, ExtraICState extra_state)
Definition: ic.cc:782
static const int kInstanceClassNameOffset
Definition: objects.h:7107
bool IsDehoistedKeyConstant(LConstantOperand *op) const
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Variable * parameter(int index) const
Definition: scopes.h:333
MemOperand ContextOperand(Register context, int index)
bool IsInteger32Constant(LConstantOperand *op) const
static const int kContextOffset
Definition: objects.h:7523
void DoDeferredStackCheck(LStackCheck *instr)
Condition ReverseCondition(Condition cond)
#define IN
Operand ToOperand(LOperand *op)
const uint32_t kSlotsZapValue
Definition: v8globals.h:86
#define UNREACHABLE()
Definition: checks.h:52
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:261
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
static const int kLengthOffset
Definition: objects.h:8905
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const int kDoubleSize
Definition: globals.h:266
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:7098
void DoDeferredNumberTagD(LNumberTagD *instr)
#define V8_INT64_C(x)
Definition: globals.h:218
static uint32_t update(uint32_tprevious, AllocationSpacevalue)
Definition: utils.h:296
const XMMRegister xmm1
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:683
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
Definition: flags.cc:211
static void MaybeCallEntryHook(MacroAssembler *masm)
Operand FieldOperand(Register object, int offset)
DwVfpRegister ToDoubleRegister(LOperand *op) const
const int kHeapObjectTag
Definition: v8.h:5473
void DoDeferredAllocate(LAllocate *instr)
const Register rbx
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
Definition: deoptimizer.cc:701
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
const Register rsp
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
#define __
const int kFPOnStackSize
Definition: globals.h:271
static const int kCallerSPOffset
Definition: frames.h:190
static const int kCacheStampOffset
Definition: objects.h:7787
static const int kPropertiesOffset
Definition: objects.h:2755
#define ASSERT_LE(v1, v2)
Definition: checks.h:334
int32_t ToInteger32(LConstantOperand *op) const
int num_parameters() const
Definition: scopes.h:338
const Register rax
static const int kMarkerOffset
Definition: frames.h:184
bool IsFastSmiElementsKind(ElementsKind kind)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
Handle< Object > ToHandle(LConstantOperand *op) const
const Register rdi
static const int kHeaderSize
Definition: objects.h:9042
static const int kNativeBitWithinByte
Definition: objects.h:7261
bool IsPowerOf2(T x)
Definition: utils.h:51
#define STATIC_ASCII_VECTOR(x)
Definition: utils.h:570
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
Definition: utils.h:574
static int OffsetOfElementAt(int index)
Definition: objects.h:3070
static int SizeFor(int length)
Definition: objects.h:3067
bool IsSmiConstant(LConstantOperand *op) const
bool NeedsDeferredFrame() const
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:3016
AllocationFlags
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
Definition: shell.cc:171
const Register arg_reg_1
static const int kMapOffset
Definition: objects.h:1890
static const int kValueOffset
Definition: objects.h:7779
bool is(Register reg) const
#define V8_OVERRIDE
Definition: v8config.h:402
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
static const int kHasNonInstancePrototype
Definition: objects.h:6468
void WriteTranslation(LEnvironment *environment, Translation *translation)
const Register kScratchRegister
static const uint32_t kSignMask
Definition: objects.h:1980
static const int kNotDeoptimizationEntry
Definition: deoptimizer.h:258
static const int kStrictModeByteOffset
Definition: objects.h:7265
const int kSmiShiftSize
Definition: v8.h:5539
const int kSmiTagSize
Definition: v8.h:5479
static const int kHeaderSize
Definition: objects.h:5604
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
static Handle< T > null()
Definition: handles.h:80
const Register rcx
Condition NegateCondition(Condition cond)
T Abs(T a)
Definition: utils.h:241
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
Definition: objects-inl.h:5190
static const int kConstructorOffset
Definition: objects.h:6428
const Register arg_reg_2
const uint32_t kOneByteStringTag
Definition: objects.h:611
static double canonical_not_the_hole_nan_as_double()
Definition: objects-inl.h:2166
const int kSmiTag
Definition: v8.h:5478
#define ASSERT_NE(v1, v2)
Definition: checks.h:331
static const int kIsUndetectable
Definition: objects.h:6472
static const int kHeaderSize
Definition: objects.h:2757
Register ToRegister(LOperand *op) const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
void DoDeferredTaggedToI(LTaggedToI *instr)
static const int kPrototypeOffset
Definition: objects.h:6427
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
static const int kHashShift
Definition: objects.h:8642
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static const int kMaxLength
Definition: objects.h:8922
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
bool IsNextEmittedBlock(int block_id) const
const int kPCOnStackSize
Definition: globals.h:270
const XMMRegister xmm2
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
static const int kSharedFunctionInfoOffset
Definition: objects.h:7521
static const int kMaxValue
Definition: objects.h:1681
const int kCharSize
Definition: globals.h:261
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8914
static const int kExponentOffset
Definition: objects.h:1977
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1516
const int kUC16Size
Definition: globals.h:312
const uint32_t kStringEncodingMask
Definition: objects.h:609
static const int kInstanceTypeOffset
Definition: objects.h:6459
static const int kMantissaOffset
Definition: objects.h:1976