v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_X64)
31 
33 #include "code-stubs.h"
34 #include "stub-cache.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 // When invoking builtins, we need to record the safepoint in the middle of
41 // the invoke instruction sequence generated by the macro assembler.
42 class SafepointGenerator : public CallWrapper {
43  public:
44  SafepointGenerator(LCodeGen* codegen,
45  LPointerMap* pointers,
46  Safepoint::DeoptMode mode)
47  : codegen_(codegen),
48  pointers_(pointers),
49  deopt_mode_(mode) { }
50  virtual ~SafepointGenerator() { }
51 
52  virtual void BeforeCall(int call_size) const {
53  codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
54  }
55 
56  virtual void AfterCall() const {
57  codegen_->RecordSafepoint(pointers_, deopt_mode_);
58  }
59 
60  private:
61  LCodeGen* codegen_;
62  LPointerMap* pointers_;
63  Safepoint::DeoptMode deopt_mode_;
64 };
65 
66 
67 #define __ masm()->
68 
69 bool LCodeGen::GenerateCode() {
70  HPhase phase("Z_Code generation", chunk());
71  ASSERT(is_unused());
72  status_ = GENERATING;
73 
74  // Open a frame scope to indicate that there is a frame on the stack. The
75  // MANUAL indicates that the scope shouldn't actually generate code to set up
76  // the frame (that is done in GeneratePrologue).
77  FrameScope frame_scope(masm_, StackFrame::MANUAL);
78 
79  return GeneratePrologue() &&
80  GenerateBody() &&
81  GenerateDeferredCode() &&
82  GenerateJumpTable() &&
83  GenerateSafepointTable();
84 }
85 
86 
87 void LCodeGen::FinishCode(Handle<Code> code) {
88  ASSERT(is_done());
89  code->set_stack_slots(GetStackSlotCount());
90  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
91  PopulateDeoptimizationData(code);
92 }
93 
94 
95 void LChunkBuilder::Abort(const char* reason) {
96  info()->set_bailout_reason(reason);
97  status_ = ABORTED;
98 }
99 
100 
101 void LCodeGen::Comment(const char* format, ...) {
102  if (!FLAG_code_comments) return;
103  char buffer[4 * KB];
104  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
105  va_list arguments;
106  va_start(arguments, format);
107  builder.AddFormattedList(format, arguments);
108  va_end(arguments);
109 
110  // Copy the string before recording it in the assembler to avoid
111  // issues when the stack allocated buffer goes out of scope.
112  int length = builder.position();
113  Vector<char> copy = Vector<char>::New(length + 1);
114  memcpy(copy.start(), builder.Finalize(), copy.length());
115  masm()->RecordComment(copy.start());
116 }
117 
118 
119 bool LCodeGen::GeneratePrologue() {
120  ASSERT(is_generating());
121 
123 
124 #ifdef DEBUG
125  if (strlen(FLAG_stop_at) > 0 &&
126  info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
127  __ int3();
128  }
129 #endif
130 
131  // Strict mode functions need to replace the receiver with undefined
132  // when called as functions (without an explicit receiver
133  // object). rcx is zero for method calls and non-zero for function
134  // calls.
135  if (!info_->is_classic_mode() || info_->is_native()) {
136  Label ok;
137  __ testq(rcx, rcx);
138  __ j(zero, &ok, Label::kNear);
139  // +1 for return address.
140  int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
141  __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
142  __ movq(Operand(rsp, receiver_offset), kScratchRegister);
143  __ bind(&ok);
144  }
145 
146  __ push(rbp); // Caller's frame pointer.
147  __ movq(rbp, rsp);
148  __ push(rsi); // Callee's context.
149  __ push(rdi); // Callee's JS function.
150 
151  // Reserve space for the stack slots needed by the code.
152  int slots = GetStackSlotCount();
153  if (slots > 0) {
154  if (FLAG_debug_code) {
155  __ Set(rax, slots);
157  Label loop;
158  __ bind(&loop);
159  __ push(kScratchRegister);
160  __ decl(rax);
161  __ j(not_zero, &loop);
162  } else {
163  __ subq(rsp, Immediate(slots * kPointerSize));
164 #ifdef _MSC_VER
165  // On windows, you may not access the stack more than one page below
166  // the most recently mapped page. To make the allocated area randomly
167  // accessible, we write to each page in turn (the value is irrelevant).
168  const int kPageSize = 4 * KB;
169  for (int offset = slots * kPointerSize - kPageSize;
170  offset > 0;
171  offset -= kPageSize) {
172  __ movq(Operand(rsp, offset), rax);
173  }
174 #endif
175  }
176  }
177 
178  // Possibly allocate a local context.
179  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
180  if (heap_slots > 0) {
181  Comment(";;; Allocate local context");
182  // Argument to NewContext is the function, which is still in rdi.
183  __ push(rdi);
184  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
185  FastNewContextStub stub(heap_slots);
186  __ CallStub(&stub);
187  } else {
188  __ CallRuntime(Runtime::kNewFunctionContext, 1);
189  }
190  RecordSafepoint(Safepoint::kNoLazyDeopt);
191  // Context is returned in both rax and rsi. It replaces the context
192  // passed to us. It's saved in the stack and kept live in rsi.
194 
195  // Copy any necessary parameters into the context.
196  int num_parameters = scope()->num_parameters();
197  for (int i = 0; i < num_parameters; i++) {
198  Variable* var = scope()->parameter(i);
199  if (var->IsContextSlot()) {
200  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
201  (num_parameters - 1 - i) * kPointerSize;
202  // Load parameter from stack.
203  __ movq(rax, Operand(rbp, parameter_offset));
204  // Store it in the context.
205  int context_offset = Context::SlotOffset(var->index());
206  __ movq(Operand(rsi, context_offset), rax);
207  // Update the write barrier. This clobbers rax and rbx.
208  __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
209  }
210  }
211  Comment(";;; End allocate local context");
212  }
213 
214  // Trace the call.
215  if (FLAG_trace) {
216  __ CallRuntime(Runtime::kTraceEnter, 0);
217  }
218  return !is_aborted();
219 }
220 
221 
222 bool LCodeGen::GenerateBody() {
223  ASSERT(is_generating());
224  bool emit_instructions = true;
225  for (current_instruction_ = 0;
226  !is_aborted() && current_instruction_ < instructions_->length();
227  current_instruction_++) {
228  LInstruction* instr = instructions_->at(current_instruction_);
229  if (instr->IsLabel()) {
230  LLabel* label = LLabel::cast(instr);
231  emit_instructions = !label->HasReplacement();
232  }
233 
234  if (emit_instructions) {
235  Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
236  instr->CompileToNative(this);
237  }
238  }
239  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
240  return !is_aborted();
241 }
242 
243 
244 bool LCodeGen::GenerateJumpTable() {
245  for (int i = 0; i < jump_table_.length(); i++) {
246  __ bind(&jump_table_[i].label);
247  __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
248  }
249  return !is_aborted();
250 }
251 
252 
253 bool LCodeGen::GenerateDeferredCode() {
254  ASSERT(is_generating());
255  if (deferred_.length() > 0) {
256  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
257  LDeferredCode* code = deferred_[i];
258  __ bind(code->entry());
259  Comment(";;; Deferred code @%d: %s.",
260  code->instruction_index(),
261  code->instr()->Mnemonic());
262  code->Generate();
263  __ jmp(code->exit());
264  }
265  }
266 
267  // Deferred code is the last part of the instruction sequence. Mark
268  // the generated code as done unless we bailed out.
269  if (!is_aborted()) status_ = DONE;
270  return !is_aborted();
271 }
272 
273 
274 bool LCodeGen::GenerateSafepointTable() {
275  ASSERT(is_done());
276  safepoints_.Emit(masm(), GetStackSlotCount());
277  return !is_aborted();
278 }
279 
280 
281 Register LCodeGen::ToRegister(int index) const {
282  return Register::FromAllocationIndex(index);
283 }
284 
285 
286 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
287  return XMMRegister::FromAllocationIndex(index);
288 }
289 
290 
291 Register LCodeGen::ToRegister(LOperand* op) const {
292  ASSERT(op->IsRegister());
293  return ToRegister(op->index());
294 }
295 
296 
297 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
298  ASSERT(op->IsDoubleRegister());
299  return ToDoubleRegister(op->index());
300 }
301 
302 
303 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
304  return op->IsConstantOperand() &&
305  chunk_->LookupLiteralRepresentation(op).IsInteger32();
306 }
307 
308 
309 bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
310  return op->IsConstantOperand() &&
311  chunk_->LookupLiteralRepresentation(op).IsTagged();
312 }
313 
314 
315 int LCodeGen::ToInteger32(LConstantOperand* op) const {
316  HConstant* constant = chunk_->LookupConstant(op);
317  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
318  ASSERT(constant->HasInteger32Value());
319  return constant->Integer32Value();
320 }
321 
322 
323 double LCodeGen::ToDouble(LConstantOperand* op) const {
324  HConstant* constant = chunk_->LookupConstant(op);
325  ASSERT(constant->HasDoubleValue());
326  return constant->DoubleValue();
327 }
328 
329 
330 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
331  HConstant* constant = chunk_->LookupConstant(op);
332  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
333  return constant->handle();
334 }
335 
336 
337 Operand LCodeGen::ToOperand(LOperand* op) const {
338  // Does not handle registers. In X64 assembler, plain registers are not
339  // representable as an Operand.
340  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
341  int index = op->index();
342  if (index >= 0) {
343  // Local or spill slot. Skip the frame pointer, function, and
344  // context in the fixed part of the frame.
345  return Operand(rbp, -(index + 3) * kPointerSize);
346  } else {
347  // Incoming parameter. Skip the return address.
348  return Operand(rbp, -(index - 1) * kPointerSize);
349  }
350 }
351 
352 
353 void LCodeGen::WriteTranslation(LEnvironment* environment,
354  Translation* translation,
355  int* arguments_index,
356  int* arguments_count) {
357  if (environment == NULL) return;
358 
359  // The translation includes one command per value in the environment.
360  int translation_size = environment->values()->length();
361  // The output frame height does not include the parameters.
362  int height = translation_size - environment->parameter_count();
363 
364  // Function parameters are arguments to the outermost environment. The
365  // arguments index points to the first element of a sequence of tagged
366  // values on the stack that represent the arguments. This needs to be
367  // kept in sync with the LArgumentsElements implementation.
368  *arguments_index = -environment->parameter_count();
369  *arguments_count = environment->parameter_count();
370 
371  WriteTranslation(environment->outer(),
372  translation,
373  arguments_index,
374  arguments_count);
375  int closure_id = *info()->closure() != *environment->closure()
376  ? DefineDeoptimizationLiteral(environment->closure())
377  : Translation::kSelfLiteralId;
378 
379  switch (environment->frame_type()) {
380  case JS_FUNCTION:
381  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
382  break;
383  case JS_CONSTRUCT:
384  translation->BeginConstructStubFrame(closure_id, translation_size);
385  break;
386  case JS_GETTER:
387  ASSERT(translation_size == 1);
388  ASSERT(height == 0);
389  translation->BeginGetterStubFrame(closure_id);
390  break;
391  case JS_SETTER:
392  ASSERT(translation_size == 2);
393  ASSERT(height == 0);
394  translation->BeginSetterStubFrame(closure_id);
395  break;
396  case ARGUMENTS_ADAPTOR:
397  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
398  break;
399  }
400 
401  // Inlined frames which push their arguments cause the index to be
402  // bumped and a new stack area to be used for materialization.
403  if (environment->entry() != NULL &&
404  environment->entry()->arguments_pushed()) {
405  *arguments_index = *arguments_index < 0
406  ? GetStackSlotCount()
407  : *arguments_index + *arguments_count;
408  *arguments_count = environment->entry()->arguments_count() + 1;
409  }
410 
411  for (int i = 0; i < translation_size; ++i) {
412  LOperand* value = environment->values()->at(i);
413  // spilled_registers_ and spilled_double_registers_ are either
414  // both NULL or both set.
415  if (environment->spilled_registers() != NULL && value != NULL) {
416  if (value->IsRegister() &&
417  environment->spilled_registers()[value->index()] != NULL) {
418  translation->MarkDuplicate();
419  AddToTranslation(translation,
420  environment->spilled_registers()[value->index()],
421  environment->HasTaggedValueAt(i),
422  environment->HasUint32ValueAt(i),
423  *arguments_index,
424  *arguments_count);
425  } else if (
426  value->IsDoubleRegister() &&
427  environment->spilled_double_registers()[value->index()] != NULL) {
428  translation->MarkDuplicate();
429  AddToTranslation(
430  translation,
431  environment->spilled_double_registers()[value->index()],
432  false,
433  false,
434  *arguments_index,
435  *arguments_count);
436  }
437  }
438 
439  AddToTranslation(translation,
440  value,
441  environment->HasTaggedValueAt(i),
442  environment->HasUint32ValueAt(i),
443  *arguments_index,
444  *arguments_count);
445  }
446 }
447 
448 
449 void LCodeGen::AddToTranslation(Translation* translation,
450  LOperand* op,
451  bool is_tagged,
452  bool is_uint32,
453  int arguments_index,
454  int arguments_count) {
455  if (op == NULL) {
456  // TODO(twuerthinger): Introduce marker operands to indicate that this value
457  // is not present and must be reconstructed from the deoptimizer. Currently
458  // this is only used for the arguments object.
459  translation->StoreArgumentsObject(arguments_index, arguments_count);
460  } else if (op->IsStackSlot()) {
461  if (is_tagged) {
462  translation->StoreStackSlot(op->index());
463  } else if (is_uint32) {
464  translation->StoreUint32StackSlot(op->index());
465  } else {
466  translation->StoreInt32StackSlot(op->index());
467  }
468  } else if (op->IsDoubleStackSlot()) {
469  translation->StoreDoubleStackSlot(op->index());
470  } else if (op->IsArgument()) {
471  ASSERT(is_tagged);
472  int src_index = GetStackSlotCount() + op->index();
473  translation->StoreStackSlot(src_index);
474  } else if (op->IsRegister()) {
475  Register reg = ToRegister(op);
476  if (is_tagged) {
477  translation->StoreRegister(reg);
478  } else if (is_uint32) {
479  translation->StoreUint32Register(reg);
480  } else {
481  translation->StoreInt32Register(reg);
482  }
483  } else if (op->IsDoubleRegister()) {
484  XMMRegister reg = ToDoubleRegister(op);
485  translation->StoreDoubleRegister(reg);
486  } else if (op->IsConstantOperand()) {
487  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
488  int src_index = DefineDeoptimizationLiteral(constant->handle());
489  translation->StoreLiteral(src_index);
490  } else {
491  UNREACHABLE();
492  }
493 }
494 
495 
496 void LCodeGen::CallCodeGeneric(Handle<Code> code,
497  RelocInfo::Mode mode,
498  LInstruction* instr,
499  SafepointMode safepoint_mode,
500  int argc) {
501  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
502  ASSERT(instr != NULL);
503  LPointerMap* pointers = instr->pointer_map();
504  RecordPosition(pointers->position());
505  __ call(code, mode);
506  RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
507 
508  // Signal that we don't inline smi code before these stubs in the
509  // optimizing code generator.
510  if (code->kind() == Code::BINARY_OP_IC ||
511  code->kind() == Code::COMPARE_IC) {
512  __ nop();
513  }
514 }
515 
516 
517 void LCodeGen::CallCode(Handle<Code> code,
518  RelocInfo::Mode mode,
519  LInstruction* instr) {
520  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
521 }
522 
523 
524 void LCodeGen::CallRuntime(const Runtime::Function* function,
525  int num_arguments,
526  LInstruction* instr) {
527  ASSERT(instr != NULL);
528  ASSERT(instr->HasPointerMap());
529  LPointerMap* pointers = instr->pointer_map();
530  RecordPosition(pointers->position());
531 
532  __ CallRuntime(function, num_arguments);
533  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
534 }
535 
536 
537 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
538  int argc,
539  LInstruction* instr) {
541  __ CallRuntimeSaveDoubles(id);
542  RecordSafepointWithRegisters(
543  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
544 }
545 
546 
547 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
548  Safepoint::DeoptMode mode) {
549  if (!environment->HasBeenRegistered()) {
550  // Physical stack frame layout:
551  // -x ............. -4 0 ..................................... y
552  // [incoming arguments] [spill slots] [pushed outgoing arguments]
553 
554  // Layout of the environment:
555  // 0 ..................................................... size-1
556  // [parameters] [locals] [expression stack including arguments]
557 
558  // Layout of the translation:
559  // 0 ........................................................ size - 1 + 4
560  // [expression stack including arguments] [locals] [4 words] [parameters]
561  // |>------------ translation_size ------------<|
562 
563  int frame_count = 0;
564  int jsframe_count = 0;
565  int args_index = 0;
566  int args_count = 0;
567  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
568  ++frame_count;
569  if (e->frame_type() == JS_FUNCTION) {
570  ++jsframe_count;
571  }
572  }
573  Translation translation(&translations_, frame_count, jsframe_count, zone());
574  WriteTranslation(environment, &translation, &args_index, &args_count);
575  int deoptimization_index = deoptimizations_.length();
576  int pc_offset = masm()->pc_offset();
577  environment->Register(deoptimization_index,
578  translation.index(),
579  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
580  deoptimizations_.Add(environment, environment->zone());
581  }
582 }
583 
584 
585 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
586  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
587  ASSERT(environment->HasBeenRegistered());
588  int id = environment->deoptimization_index();
590  if (entry == NULL) {
591  Abort("bailout was not prepared");
592  return;
593  }
594 
595  if (cc == no_condition) {
596  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
597  } else {
598  // We often have several deopts to the same entry, reuse the last
599  // jump entry if this is the case.
600  if (jump_table_.is_empty() ||
601  jump_table_.last().address != entry) {
602  jump_table_.Add(JumpTableEntry(entry), zone());
603  }
604  __ j(cc, &jump_table_.last().label);
605  }
606 }
607 
608 
609 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
610  int length = deoptimizations_.length();
611  if (length == 0) return;
612  Handle<DeoptimizationInputData> data =
613  factory()->NewDeoptimizationInputData(length, TENURED);
614 
615  Handle<ByteArray> translations = translations_.CreateByteArray();
616  data->SetTranslationByteArray(*translations);
617  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
618 
619  Handle<FixedArray> literals =
620  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
621  for (int i = 0; i < deoptimization_literals_.length(); i++) {
622  literals->set(i, *deoptimization_literals_[i]);
623  }
624  data->SetLiteralArray(*literals);
625 
626  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
627  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
628 
629  // Populate the deoptimization entries.
630  for (int i = 0; i < length; i++) {
631  LEnvironment* env = deoptimizations_[i];
632  data->SetAstId(i, env->ast_id());
633  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
634  data->SetArgumentsStackHeight(i,
635  Smi::FromInt(env->arguments_stack_height()));
636  data->SetPc(i, Smi::FromInt(env->pc_offset()));
637  }
638  code->set_deoptimization_data(*data);
639 }
640 
641 
642 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
643  int result = deoptimization_literals_.length();
644  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
645  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
646  }
647  deoptimization_literals_.Add(literal, zone());
648  return result;
649 }
650 
651 
652 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
653  ASSERT(deoptimization_literals_.length() == 0);
654 
655  const ZoneList<Handle<JSFunction> >* inlined_closures =
656  chunk()->inlined_closures();
657 
658  for (int i = 0, length = inlined_closures->length();
659  i < length;
660  i++) {
661  DefineDeoptimizationLiteral(inlined_closures->at(i));
662  }
663 
664  inlined_function_count_ = deoptimization_literals_.length();
665 }
666 
667 
668 void LCodeGen::RecordSafepointWithLazyDeopt(
669  LInstruction* instr, SafepointMode safepoint_mode, int argc) {
670  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
671  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
672  } else {
673  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
674  RecordSafepointWithRegisters(
675  instr->pointer_map(), argc, Safepoint::kLazyDeopt);
676  }
677 }
678 
679 
680 void LCodeGen::RecordSafepoint(
681  LPointerMap* pointers,
682  Safepoint::Kind kind,
683  int arguments,
684  Safepoint::DeoptMode deopt_mode) {
685  ASSERT(kind == expected_safepoint_kind_);
686 
687  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
688 
689  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
690  kind, arguments, deopt_mode);
691  for (int i = 0; i < operands->length(); i++) {
692  LOperand* pointer = operands->at(i);
693  if (pointer->IsStackSlot()) {
694  safepoint.DefinePointerSlot(pointer->index(), zone());
695  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
696  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
697  }
698  }
699  if (kind & Safepoint::kWithRegisters) {
700  // Register rsi always contains a pointer to the context.
701  safepoint.DefinePointerRegister(rsi, zone());
702  }
703 }
704 
705 
706 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
707  Safepoint::DeoptMode deopt_mode) {
708  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
709 }
710 
711 
712 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
713  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
714  RecordSafepoint(&empty_pointers, deopt_mode);
715 }
716 
717 
718 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
719  int arguments,
720  Safepoint::DeoptMode deopt_mode) {
721  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
722 }
723 
724 
725 void LCodeGen::RecordPosition(int position) {
726  if (position == RelocInfo::kNoPosition) return;
727  masm()->positions_recorder()->RecordPosition(position);
728 }
729 
730 
731 void LCodeGen::DoLabel(LLabel* label) {
732  if (label->is_loop_header()) {
733  Comment(";;; B%d - LOOP entry", label->block_id());
734  } else {
735  Comment(";;; B%d", label->block_id());
736  }
737  __ bind(label->label());
738  current_block_ = label->block_id();
739  DoGap(label);
740 }
741 
742 
743 void LCodeGen::DoParallelMove(LParallelMove* move) {
744  resolver_.Resolve(move);
745 }
746 
747 
748 void LCodeGen::DoGap(LGap* gap) {
749  for (int i = LGap::FIRST_INNER_POSITION;
751  i++) {
752  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
753  LParallelMove* move = gap->GetParallelMove(inner_pos);
754  if (move != NULL) DoParallelMove(move);
755  }
756 }
757 
758 
759 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
760  DoGap(instr);
761 }
762 
763 
764 void LCodeGen::DoParameter(LParameter* instr) {
765  // Nothing to do.
766 }
767 
768 
769 void LCodeGen::DoCallStub(LCallStub* instr) {
770  ASSERT(ToRegister(instr->result()).is(rax));
771  switch (instr->hydrogen()->major_key()) {
772  case CodeStub::RegExpConstructResult: {
773  RegExpConstructResultStub stub;
774  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
775  break;
776  }
777  case CodeStub::RegExpExec: {
778  RegExpExecStub stub;
779  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
780  break;
781  }
782  case CodeStub::SubString: {
783  SubStringStub stub;
784  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
785  break;
786  }
787  case CodeStub::NumberToString: {
788  NumberToStringStub stub;
789  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
790  break;
791  }
792  case CodeStub::StringAdd: {
793  StringAddStub stub(NO_STRING_ADD_FLAGS);
794  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
795  break;
796  }
797  case CodeStub::StringCompare: {
798  StringCompareStub stub;
799  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
800  break;
801  }
802  case CodeStub::TranscendentalCache: {
803  TranscendentalCacheStub stub(instr->transcendental_type(),
805  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
806  break;
807  }
808  default:
809  UNREACHABLE();
810  }
811 }
812 
813 
814 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
815  // Nothing to do.
816 }
817 
818 
819 void LCodeGen::DoModI(LModI* instr) {
820  if (instr->hydrogen()->HasPowerOf2Divisor()) {
821  Register dividend = ToRegister(instr->left());
822 
823  int32_t divisor =
824  HConstant::cast(instr->hydrogen()->right())->Integer32Value();
825 
826  if (divisor < 0) divisor = -divisor;
827 
828  Label positive_dividend, done;
829  __ testl(dividend, dividend);
830  __ j(not_sign, &positive_dividend, Label::kNear);
831  __ negl(dividend);
832  __ andl(dividend, Immediate(divisor - 1));
833  __ negl(dividend);
834  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
835  __ j(not_zero, &done, Label::kNear);
836  DeoptimizeIf(no_condition, instr->environment());
837  } else {
838  __ jmp(&done, Label::kNear);
839  }
840  __ bind(&positive_dividend);
841  __ andl(dividend, Immediate(divisor - 1));
842  __ bind(&done);
843  } else {
844  Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
845  Register left_reg = ToRegister(instr->left());
846  Register right_reg = ToRegister(instr->right());
847  Register result_reg = ToRegister(instr->result());
848 
849  ASSERT(left_reg.is(rax));
850  ASSERT(result_reg.is(rdx));
851  ASSERT(!right_reg.is(rax));
852  ASSERT(!right_reg.is(rdx));
853 
854  // Check for x % 0.
855  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
856  __ testl(right_reg, right_reg);
857  DeoptimizeIf(zero, instr->environment());
858  }
859 
860  __ testl(left_reg, left_reg);
861  __ j(zero, &remainder_eq_dividend, Label::kNear);
862  __ j(sign, &slow, Label::kNear);
863 
864  __ testl(right_reg, right_reg);
865  __ j(not_sign, &both_positive, Label::kNear);
866  // The sign of the divisor doesn't matter.
867  __ neg(right_reg);
868 
869  __ bind(&both_positive);
870  // If the dividend is smaller than the nonnegative
871  // divisor, the dividend is the result.
872  __ cmpl(left_reg, right_reg);
873  __ j(less, &remainder_eq_dividend, Label::kNear);
874 
875  // Check if the divisor is a PowerOfTwo integer.
876  Register scratch = ToRegister(instr->temp());
877  __ movl(scratch, right_reg);
878  __ subl(scratch, Immediate(1));
879  __ testl(scratch, right_reg);
880  __ j(not_zero, &do_subtraction, Label::kNear);
881  __ andl(left_reg, scratch);
882  __ jmp(&remainder_eq_dividend, Label::kNear);
883 
884  __ bind(&do_subtraction);
885  const int kUnfolds = 3;
886  // Try a few subtractions of the dividend.
887  __ movl(scratch, left_reg);
888  for (int i = 0; i < kUnfolds; i++) {
889  // Reduce the dividend by the divisor.
890  __ subl(left_reg, right_reg);
891  // Check if the dividend is less than the divisor.
892  __ cmpl(left_reg, right_reg);
893  __ j(less, &remainder_eq_dividend, Label::kNear);
894  }
895  __ movl(left_reg, scratch);
896 
897  // Slow case, using idiv instruction.
898  __ bind(&slow);
899  // Sign extend eax to edx.
900  // (We are using only the low 32 bits of the values.)
901  __ cdq();
902 
903  // Check for (0 % -x) that will produce negative zero.
904  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
905  Label positive_left;
906  Label done;
907  __ testl(left_reg, left_reg);
908  __ j(not_sign, &positive_left, Label::kNear);
909  __ idivl(right_reg);
910 
911  // Test the remainder for 0, because then the result would be -0.
912  __ testl(result_reg, result_reg);
913  __ j(not_zero, &done, Label::kNear);
914 
915  DeoptimizeIf(no_condition, instr->environment());
916  __ bind(&positive_left);
917  __ idivl(right_reg);
918  __ bind(&done);
919  } else {
920  __ idivl(right_reg);
921  }
922  __ jmp(&done, Label::kNear);
923 
924  __ bind(&remainder_eq_dividend);
925  __ movl(result_reg, left_reg);
926 
927  __ bind(&done);
928  }
929 }
930 
931 
932 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
933  ASSERT(instr->right()->IsConstantOperand());
934 
935  const Register dividend = ToRegister(instr->left());
936  int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
937  const Register result = ToRegister(instr->result());
938 
939  switch (divisor) {
940  case 0:
941  DeoptimizeIf(no_condition, instr->environment());
942  return;
943 
944  case 1:
945  if (!result.is(dividend)) {
946  __ movl(result, dividend);
947  }
948  return;
949 
950  case -1:
951  if (!result.is(dividend)) {
952  __ movl(result, dividend);
953  }
954  __ negl(result);
955  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
956  DeoptimizeIf(zero, instr->environment());
957  }
958  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
959  DeoptimizeIf(overflow, instr->environment());
960  }
961  return;
962  }
963 
964  uint32_t divisor_abs = abs(divisor);
965  if (IsPowerOf2(divisor_abs)) {
966  int32_t power = WhichPowerOf2(divisor_abs);
967  if (divisor < 0) {
968  __ movsxlq(result, dividend);
969  __ neg(result);
970  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
971  DeoptimizeIf(zero, instr->environment());
972  }
973  __ sar(result, Immediate(power));
974  } else {
975  if (!result.is(dividend)) {
976  __ movl(result, dividend);
977  }
978  __ sarl(result, Immediate(power));
979  }
980  } else {
981  Register reg1 = ToRegister(instr->temp());
982  Register reg2 = ToRegister(instr->result());
983 
984  // Find b which: 2^b < divisor_abs < 2^(b+1).
985  unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
986  unsigned shift = 32 + b; // Precision +1bit (effectively).
987  double multiplier_f =
988  static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
989  int64_t multiplier;
990  if (multiplier_f - floor(multiplier_f) < 0.5) {
991  multiplier = static_cast<int64_t>(floor(multiplier_f));
992  } else {
993  multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
994  }
995  // The multiplier is a uint32.
996  ASSERT(multiplier > 0 &&
997  multiplier < (static_cast<int64_t>(1) << 32));
998  // The multiply is int64, so sign-extend to r64.
999  __ movsxlq(reg1, dividend);
1000  if (divisor < 0 &&
1001  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1002  __ neg(reg1);
1003  DeoptimizeIf(zero, instr->environment());
1004  }
1005  __ movq(reg2, multiplier, RelocInfo::NONE);
1006  // Result just fit in r64, because it's int32 * uint32.
1007  __ imul(reg2, reg1);
1008 
1009  __ addq(reg2, Immediate(1 << 30));
1010  __ sar(reg2, Immediate(shift));
1011  }
1012 }
1013 
1014 
1015 void LCodeGen::DoDivI(LDivI* instr) {
1016  LOperand* right = instr->right();
1017  ASSERT(ToRegister(instr->result()).is(rax));
1018  ASSERT(ToRegister(instr->left()).is(rax));
1019  ASSERT(!ToRegister(instr->right()).is(rax));
1020  ASSERT(!ToRegister(instr->right()).is(rdx));
1021 
1022  Register left_reg = rax;
1023 
1024  // Check for x / 0.
1025  Register right_reg = ToRegister(right);
1026  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1027  __ testl(right_reg, right_reg);
1028  DeoptimizeIf(zero, instr->environment());
1029  }
1030 
1031  // Check for (0 / -x) that will produce negative zero.
1032  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1033  Label left_not_zero;
1034  __ testl(left_reg, left_reg);
1035  __ j(not_zero, &left_not_zero, Label::kNear);
1036  __ testl(right_reg, right_reg);
1037  DeoptimizeIf(sign, instr->environment());
1038  __ bind(&left_not_zero);
1039  }
1040 
1041  // Check for (-kMinInt / -1).
1042  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1043  Label left_not_min_int;
1044  __ cmpl(left_reg, Immediate(kMinInt));
1045  __ j(not_zero, &left_not_min_int, Label::kNear);
1046  __ cmpl(right_reg, Immediate(-1));
1047  DeoptimizeIf(zero, instr->environment());
1048  __ bind(&left_not_min_int);
1049  }
1050 
1051  // Sign extend to rdx.
1052  __ cdq();
1053  __ idivl(right_reg);
1054 
1055  // Deoptimize if remainder is not 0.
1056  __ testl(rdx, rdx);
1057  DeoptimizeIf(not_zero, instr->environment());
1058 }
1059 
1060 
1061 void LCodeGen::DoMulI(LMulI* instr) {
1062  Register left = ToRegister(instr->left());
1063  LOperand* right = instr->right();
1064 
1065  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1066  __ movl(kScratchRegister, left);
1067  }
1068 
1069  bool can_overflow =
1070  instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1071  if (right->IsConstantOperand()) {
1072  int right_value = ToInteger32(LConstantOperand::cast(right));
1073  if (right_value == -1) {
1074  __ negl(left);
1075  } else if (right_value == 0) {
1076  __ xorl(left, left);
1077  } else if (right_value == 2) {
1078  __ addl(left, left);
1079  } else if (!can_overflow) {
1080  // If the multiplication is known to not overflow, we
1081  // can use operations that don't set the overflow flag
1082  // correctly.
1083  switch (right_value) {
1084  case 1:
1085  // Do nothing.
1086  break;
1087  case 3:
1088  __ leal(left, Operand(left, left, times_2, 0));
1089  break;
1090  case 4:
1091  __ shll(left, Immediate(2));
1092  break;
1093  case 5:
1094  __ leal(left, Operand(left, left, times_4, 0));
1095  break;
1096  case 8:
1097  __ shll(left, Immediate(3));
1098  break;
1099  case 9:
1100  __ leal(left, Operand(left, left, times_8, 0));
1101  break;
1102  case 16:
1103  __ shll(left, Immediate(4));
1104  break;
1105  default:
1106  __ imull(left, left, Immediate(right_value));
1107  break;
1108  }
1109  } else {
1110  __ imull(left, left, Immediate(right_value));
1111  }
1112  } else if (right->IsStackSlot()) {
1113  __ imull(left, ToOperand(right));
1114  } else {
1115  __ imull(left, ToRegister(right));
1116  }
1117 
1118  if (can_overflow) {
1119  DeoptimizeIf(overflow, instr->environment());
1120  }
1121 
1122  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1123  // Bail out if the result is supposed to be negative zero.
1124  Label done;
1125  __ testl(left, left);
1126  __ j(not_zero, &done, Label::kNear);
1127  if (right->IsConstantOperand()) {
1128  if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1129  DeoptimizeIf(no_condition, instr->environment());
1130  } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1131  __ cmpl(kScratchRegister, Immediate(0));
1132  DeoptimizeIf(less, instr->environment());
1133  }
1134  } else if (right->IsStackSlot()) {
1135  __ orl(kScratchRegister, ToOperand(right));
1136  DeoptimizeIf(sign, instr->environment());
1137  } else {
1138  // Test the non-zero operand for negative sign.
1139  __ orl(kScratchRegister, ToRegister(right));
1140  DeoptimizeIf(sign, instr->environment());
1141  }
1142  __ bind(&done);
1143  }
1144 }
1145 
1146 
1147 void LCodeGen::DoBitI(LBitI* instr) {
1148  LOperand* left = instr->left();
1149  LOperand* right = instr->right();
1150  ASSERT(left->Equals(instr->result()));
1151  ASSERT(left->IsRegister());
1152 
1153  if (right->IsConstantOperand()) {
1154  int right_operand = ToInteger32(LConstantOperand::cast(right));
1155  switch (instr->op()) {
1156  case Token::BIT_AND:
1157  __ andl(ToRegister(left), Immediate(right_operand));
1158  break;
1159  case Token::BIT_OR:
1160  __ orl(ToRegister(left), Immediate(right_operand));
1161  break;
1162  case Token::BIT_XOR:
1163  __ xorl(ToRegister(left), Immediate(right_operand));
1164  break;
1165  default:
1166  UNREACHABLE();
1167  break;
1168  }
1169  } else if (right->IsStackSlot()) {
1170  switch (instr->op()) {
1171  case Token::BIT_AND:
1172  __ andl(ToRegister(left), ToOperand(right));
1173  break;
1174  case Token::BIT_OR:
1175  __ orl(ToRegister(left), ToOperand(right));
1176  break;
1177  case Token::BIT_XOR:
1178  __ xorl(ToRegister(left), ToOperand(right));
1179  break;
1180  default:
1181  UNREACHABLE();
1182  break;
1183  }
1184  } else {
1185  ASSERT(right->IsRegister());
1186  switch (instr->op()) {
1187  case Token::BIT_AND:
1188  __ andl(ToRegister(left), ToRegister(right));
1189  break;
1190  case Token::BIT_OR:
1191  __ orl(ToRegister(left), ToRegister(right));
1192  break;
1193  case Token::BIT_XOR:
1194  __ xorl(ToRegister(left), ToRegister(right));
1195  break;
1196  default:
1197  UNREACHABLE();
1198  break;
1199  }
1200  }
1201 }
1202 
1203 
1204 void LCodeGen::DoShiftI(LShiftI* instr) {
1205  LOperand* left = instr->left();
1206  LOperand* right = instr->right();
1207  ASSERT(left->Equals(instr->result()));
1208  ASSERT(left->IsRegister());
1209  if (right->IsRegister()) {
1210  ASSERT(ToRegister(right).is(rcx));
1211 
1212  switch (instr->op()) {
1213  case Token::SAR:
1214  __ sarl_cl(ToRegister(left));
1215  break;
1216  case Token::SHR:
1217  __ shrl_cl(ToRegister(left));
1218  if (instr->can_deopt()) {
1219  __ testl(ToRegister(left), ToRegister(left));
1220  DeoptimizeIf(negative, instr->environment());
1221  }
1222  break;
1223  case Token::SHL:
1224  __ shll_cl(ToRegister(left));
1225  break;
1226  default:
1227  UNREACHABLE();
1228  break;
1229  }
1230  } else {
1231  int value = ToInteger32(LConstantOperand::cast(right));
1232  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1233  switch (instr->op()) {
1234  case Token::SAR:
1235  if (shift_count != 0) {
1236  __ sarl(ToRegister(left), Immediate(shift_count));
1237  }
1238  break;
1239  case Token::SHR:
1240  if (shift_count == 0 && instr->can_deopt()) {
1241  __ testl(ToRegister(left), ToRegister(left));
1242  DeoptimizeIf(negative, instr->environment());
1243  } else {
1244  __ shrl(ToRegister(left), Immediate(shift_count));
1245  }
1246  break;
1247  case Token::SHL:
1248  if (shift_count != 0) {
1249  __ shll(ToRegister(left), Immediate(shift_count));
1250  }
1251  break;
1252  default:
1253  UNREACHABLE();
1254  break;
1255  }
1256  }
1257 }
1258 
1259 
1260 void LCodeGen::DoSubI(LSubI* instr) {
1261  LOperand* left = instr->left();
1262  LOperand* right = instr->right();
1263  ASSERT(left->Equals(instr->result()));
1264 
1265  if (right->IsConstantOperand()) {
1266  __ subl(ToRegister(left),
1267  Immediate(ToInteger32(LConstantOperand::cast(right))));
1268  } else if (right->IsRegister()) {
1269  __ subl(ToRegister(left), ToRegister(right));
1270  } else {
1271  __ subl(ToRegister(left), ToOperand(right));
1272  }
1273 
1274  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1275  DeoptimizeIf(overflow, instr->environment());
1276  }
1277 }
1278 
1279 
1280 void LCodeGen::DoConstantI(LConstantI* instr) {
1281  ASSERT(instr->result()->IsRegister());
1282  __ Set(ToRegister(instr->result()), instr->value());
1283 }
1284 
1285 
1286 void LCodeGen::DoConstantD(LConstantD* instr) {
1287  ASSERT(instr->result()->IsDoubleRegister());
1288  XMMRegister res = ToDoubleRegister(instr->result());
1289  double v = instr->value();
1290  uint64_t int_val = BitCast<uint64_t, double>(v);
1291  // Use xor to produce +0.0 in a fast and compact way, but avoid to
1292  // do so if the constant is -0.0.
1293  if (int_val == 0) {
1294  __ xorps(res, res);
1295  } else {
1296  Register tmp = ToRegister(instr->temp());
1297  __ Set(tmp, int_val);
1298  __ movq(res, tmp);
1299  }
1300 }
1301 
1302 
1303 void LCodeGen::DoConstantT(LConstantT* instr) {
1304  Handle<Object> value = instr->value();
1305  if (value->IsSmi()) {
1306  __ Move(ToRegister(instr->result()), value);
1307  } else {
1308  __ LoadHeapObject(ToRegister(instr->result()),
1309  Handle<HeapObject>::cast(value));
1310  }
1311 }
1312 
1313 
1314 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1315  Register result = ToRegister(instr->result());
1316  Register array = ToRegister(instr->value());
1317  __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
1318 }
1319 
1320 
1321 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1322  Register result = ToRegister(instr->result());
1323  Register array = ToRegister(instr->value());
1324  __ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
1325 }
1326 
1327 
1328 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1329  Register result = ToRegister(instr->result());
1330  Register map = ToRegister(instr->value());
1331  __ EnumLength(result, map);
1332 }
1333 
1334 
1335 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1336  Register result = ToRegister(instr->result());
1337  Register input = ToRegister(instr->value());
1338 
1339  // Load map into |result|.
1340  __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
1341  // Load the map's "bit field 2" into |result|. We only need the first byte.
1342  __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
1343  // Retrieve elements_kind from bit field 2.
1344  __ and_(result, Immediate(Map::kElementsKindMask));
1345  __ shr(result, Immediate(Map::kElementsKindShift));
1346 }
1347 
1348 
1349 void LCodeGen::DoValueOf(LValueOf* instr) {
1350  Register input = ToRegister(instr->value());
1351  Register result = ToRegister(instr->result());
1352  ASSERT(input.is(result));
1353  Label done;
1354  // If the object is a smi return the object.
1355  __ JumpIfSmi(input, &done, Label::kNear);
1356 
1357  // If the object is not a value type, return the object.
1358  __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
1359  __ j(not_equal, &done, Label::kNear);
1360  __ movq(result, FieldOperand(input, JSValue::kValueOffset));
1361 
1362  __ bind(&done);
1363 }
1364 
1365 
1366 void LCodeGen::DoDateField(LDateField* instr) {
1367  Register object = ToRegister(instr->date());
1368  Register result = ToRegister(instr->result());
1369  Smi* index = instr->index();
1370  Label runtime, done, not_date_object;
1371  ASSERT(object.is(result));
1372  ASSERT(object.is(rax));
1373 
1374  Condition cc = masm()->CheckSmi(object);
1375  DeoptimizeIf(cc, instr->environment());
1376  __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1377  DeoptimizeIf(not_equal, instr->environment());
1378 
1379  if (index->value() == 0) {
1380  __ movq(result, FieldOperand(object, JSDate::kValueOffset));
1381  } else {
1382  if (index->value() < JSDate::kFirstUncachedField) {
1383  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1384  __ movq(kScratchRegister, stamp);
1385  __ cmpq(kScratchRegister, FieldOperand(object,
1387  __ j(not_equal, &runtime, Label::kNear);
1388  __ movq(result, FieldOperand(object, JSDate::kValueOffset +
1389  kPointerSize * index->value()));
1390  __ jmp(&done);
1391  }
1392  __ bind(&runtime);
1393  __ PrepareCallCFunction(2);
1394 #ifdef _WIN64
1395  __ movq(rcx, object);
1396  __ movq(rdx, index, RelocInfo::NONE);
1397 #else
1398  __ movq(rdi, object);
1399  __ movq(rsi, index, RelocInfo::NONE);
1400 #endif
1401  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1403  __ bind(&done);
1404  }
1405 }
1406 
1407 
1408 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1409  LOperand* input = instr->value();
1410  ASSERT(input->Equals(instr->result()));
1411  __ not_(ToRegister(input));
1412 }
1413 
1414 
1415 void LCodeGen::DoThrow(LThrow* instr) {
1416  __ push(ToRegister(instr->value()));
1417  CallRuntime(Runtime::kThrow, 1, instr);
1418 
1419  if (FLAG_debug_code) {
1420  Comment("Unreachable code.");
1421  __ int3();
1422  }
1423 }
1424 
1425 
1426 void LCodeGen::DoAddI(LAddI* instr) {
1427  LOperand* left = instr->left();
1428  LOperand* right = instr->right();
1429  ASSERT(left->Equals(instr->result()));
1430 
1431  if (right->IsConstantOperand()) {
1432  __ addl(ToRegister(left),
1433  Immediate(ToInteger32(LConstantOperand::cast(right))));
1434  } else if (right->IsRegister()) {
1435  __ addl(ToRegister(left), ToRegister(right));
1436  } else {
1437  __ addl(ToRegister(left), ToOperand(right));
1438  }
1439 
1440  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1441  DeoptimizeIf(overflow, instr->environment());
1442  }
1443 }
1444 
1445 
1446 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1447  LOperand* left = instr->left();
1448  LOperand* right = instr->right();
1449  ASSERT(left->Equals(instr->result()));
1450  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1451  if (instr->hydrogen()->representation().IsInteger32()) {
1452  Label return_left;
1453  Condition condition = (operation == HMathMinMax::kMathMin)
1454  ? less_equal
1455  : greater_equal;
1456  Register left_reg = ToRegister(left);
1457  if (right->IsConstantOperand()) {
1458  Immediate right_imm =
1459  Immediate(ToInteger32(LConstantOperand::cast(right)));
1460  __ cmpq(left_reg, right_imm);
1461  __ j(condition, &return_left, Label::kNear);
1462  __ movq(left_reg, right_imm);
1463  } else if (right->IsRegister()) {
1464  Register right_reg = ToRegister(right);
1465  __ cmpq(left_reg, right_reg);
1466  __ j(condition, &return_left, Label::kNear);
1467  __ movq(left_reg, right_reg);
1468  } else {
1469  Operand right_op = ToOperand(right);
1470  __ cmpq(left_reg, right_op);
1471  __ j(condition, &return_left, Label::kNear);
1472  __ movq(left_reg, right_op);
1473  }
1474  __ bind(&return_left);
1475  } else {
1476  ASSERT(instr->hydrogen()->representation().IsDouble());
1477  Label check_nan_left, check_zero, return_left, return_right;
1478  Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1479  XMMRegister left_reg = ToDoubleRegister(left);
1480  XMMRegister right_reg = ToDoubleRegister(right);
1481  __ ucomisd(left_reg, right_reg);
1482  __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1483  __ j(equal, &check_zero, Label::kNear); // left == right.
1484  __ j(condition, &return_left, Label::kNear);
1485  __ jmp(&return_right, Label::kNear);
1486 
1487  __ bind(&check_zero);
1488  XMMRegister xmm_scratch = xmm0;
1489  __ xorps(xmm_scratch, xmm_scratch);
1490  __ ucomisd(left_reg, xmm_scratch);
1491  __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1492  // At this point, both left and right are either 0 or -0.
1493  if (operation == HMathMinMax::kMathMin) {
1494  __ orpd(left_reg, right_reg);
1495  } else {
1496  // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1497  __ addsd(left_reg, right_reg);
1498  }
1499  __ jmp(&return_left, Label::kNear);
1500 
1501  __ bind(&check_nan_left);
1502  __ ucomisd(left_reg, left_reg); // NaN check.
1503  __ j(parity_even, &return_left, Label::kNear);
1504  __ bind(&return_right);
1505  __ movsd(left_reg, right_reg);
1506 
1507  __ bind(&return_left);
1508  }
1509 }
1510 
1511 
1512 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1513  XMMRegister left = ToDoubleRegister(instr->left());
1514  XMMRegister right = ToDoubleRegister(instr->right());
1515  XMMRegister result = ToDoubleRegister(instr->result());
1516  // All operations except MOD are computed in-place.
1517  ASSERT(instr->op() == Token::MOD || left.is(result));
1518  switch (instr->op()) {
1519  case Token::ADD:
1520  __ addsd(left, right);
1521  break;
1522  case Token::SUB:
1523  __ subsd(left, right);
1524  break;
1525  case Token::MUL:
1526  __ mulsd(left, right);
1527  break;
1528  case Token::DIV:
1529  __ divsd(left, right);
1530  break;
1531  case Token::MOD:
1532  __ PrepareCallCFunction(2);
1533  __ movaps(xmm0, left);
1534  ASSERT(right.is(xmm1));
1535  __ CallCFunction(
1536  ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
1538  __ movaps(result, xmm0);
1539  break;
1540  default:
1541  UNREACHABLE();
1542  break;
1543  }
1544 }
1545 
1546 
1547 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1548  ASSERT(ToRegister(instr->left()).is(rdx));
1549  ASSERT(ToRegister(instr->right()).is(rax));
1550  ASSERT(ToRegister(instr->result()).is(rax));
1551 
1552  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1553  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1554  __ nop(); // Signals no inlined code.
1555 }
1556 
1557 
1558 int LCodeGen::GetNextEmittedBlock(int block) {
1559  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1560  LLabel* label = chunk_->GetLabel(i);
1561  if (!label->HasReplacement()) return i;
1562  }
1563  return -1;
1564 }
1565 
1566 
1567 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1568  int next_block = GetNextEmittedBlock(current_block_);
1569  right_block = chunk_->LookupDestination(right_block);
1570  left_block = chunk_->LookupDestination(left_block);
1571 
1572  if (right_block == left_block) {
1573  EmitGoto(left_block);
1574  } else if (left_block == next_block) {
1575  __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1576  } else if (right_block == next_block) {
1577  __ j(cc, chunk_->GetAssemblyLabel(left_block));
1578  } else {
1579  __ j(cc, chunk_->GetAssemblyLabel(left_block));
1580  if (cc != always) {
1581  __ jmp(chunk_->GetAssemblyLabel(right_block));
1582  }
1583  }
1584 }
1585 
1586 
1587 void LCodeGen::DoBranch(LBranch* instr) {
1588  int true_block = chunk_->LookupDestination(instr->true_block_id());
1589  int false_block = chunk_->LookupDestination(instr->false_block_id());
1590 
1591  Representation r = instr->hydrogen()->value()->representation();
1592  if (r.IsInteger32()) {
1593  Register reg = ToRegister(instr->value());
1594  __ testl(reg, reg);
1595  EmitBranch(true_block, false_block, not_zero);
1596  } else if (r.IsDouble()) {
1597  XMMRegister reg = ToDoubleRegister(instr->value());
1598  __ xorps(xmm0, xmm0);
1599  __ ucomisd(reg, xmm0);
1600  EmitBranch(true_block, false_block, not_equal);
1601  } else {
1602  ASSERT(r.IsTagged());
1603  Register reg = ToRegister(instr->value());
1604  HType type = instr->hydrogen()->value()->type();
1605  if (type.IsBoolean()) {
1606  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1607  EmitBranch(true_block, false_block, equal);
1608  } else if (type.IsSmi()) {
1609  __ SmiCompare(reg, Smi::FromInt(0));
1610  EmitBranch(true_block, false_block, not_equal);
1611  } else {
1612  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1613  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1614 
1615  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1616  // Avoid deopts in the case where we've never executed this path before.
1617  if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1618 
1619  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1620  // undefined -> false.
1621  __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1622  __ j(equal, false_label);
1623  }
1624  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1625  // true -> true.
1626  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1627  __ j(equal, true_label);
1628  // false -> false.
1629  __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1630  __ j(equal, false_label);
1631  }
1632  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1633  // 'null' -> false.
1634  __ CompareRoot(reg, Heap::kNullValueRootIndex);
1635  __ j(equal, false_label);
1636  }
1637 
1638  if (expected.Contains(ToBooleanStub::SMI)) {
1639  // Smis: 0 -> false, all other -> true.
1640  __ Cmp(reg, Smi::FromInt(0));
1641  __ j(equal, false_label);
1642  __ JumpIfSmi(reg, true_label);
1643  } else if (expected.NeedsMap()) {
1644  // If we need a map later and have a Smi -> deopt.
1645  __ testb(reg, Immediate(kSmiTagMask));
1646  DeoptimizeIf(zero, instr->environment());
1647  }
1648 
1649  const Register map = kScratchRegister;
1650  if (expected.NeedsMap()) {
1651  __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
1652 
1653  if (expected.CanBeUndetectable()) {
1654  // Undetectable -> false.
1655  __ testb(FieldOperand(map, Map::kBitFieldOffset),
1656  Immediate(1 << Map::kIsUndetectable));
1657  __ j(not_zero, false_label);
1658  }
1659  }
1660 
1661  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1662  // spec object -> true.
1663  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
1664  __ j(above_equal, true_label);
1665  }
1666 
1667  if (expected.Contains(ToBooleanStub::STRING)) {
1668  // String value -> false iff empty.
1669  Label not_string;
1670  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
1671  __ j(above_equal, &not_string, Label::kNear);
1672  __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
1673  __ j(not_zero, true_label);
1674  __ jmp(false_label);
1675  __ bind(&not_string);
1676  }
1677 
1678  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1679  // heap number -> false iff +0, -0, or NaN.
1680  Label not_heap_number;
1681  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1682  __ j(not_equal, &not_heap_number, Label::kNear);
1683  __ xorps(xmm0, xmm0);
1684  __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
1685  __ j(zero, false_label);
1686  __ jmp(true_label);
1687  __ bind(&not_heap_number);
1688  }
1689 
1690  // We've seen something for the first time -> deopt.
1691  DeoptimizeIf(no_condition, instr->environment());
1692  }
1693  }
1694 }
1695 
1696 
1697 void LCodeGen::EmitGoto(int block) {
1698  block = chunk_->LookupDestination(block);
1699  int next_block = GetNextEmittedBlock(current_block_);
1700  if (block != next_block) {
1701  __ jmp(chunk_->GetAssemblyLabel(block));
1702  }
1703 }
1704 
1705 
1706 void LCodeGen::DoGoto(LGoto* instr) {
1707  EmitGoto(instr->block_id());
1708 }
1709 
1710 
1711 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1712  Condition cond = no_condition;
1713  switch (op) {
1714  case Token::EQ:
1715  case Token::EQ_STRICT:
1716  cond = equal;
1717  break;
1718  case Token::LT:
1719  cond = is_unsigned ? below : less;
1720  break;
1721  case Token::GT:
1722  cond = is_unsigned ? above : greater;
1723  break;
1724  case Token::LTE:
1725  cond = is_unsigned ? below_equal : less_equal;
1726  break;
1727  case Token::GTE:
1728  cond = is_unsigned ? above_equal : greater_equal;
1729  break;
1730  case Token::IN:
1731  case Token::INSTANCEOF:
1732  default:
1733  UNREACHABLE();
1734  }
1735  return cond;
1736 }
1737 
1738 
1739 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1740  LOperand* left = instr->left();
1741  LOperand* right = instr->right();
1742  int false_block = chunk_->LookupDestination(instr->false_block_id());
1743  int true_block = chunk_->LookupDestination(instr->true_block_id());
1744  Condition cc = TokenToCondition(instr->op(), instr->is_double());
1745 
1746  if (left->IsConstantOperand() && right->IsConstantOperand()) {
1747  // We can statically evaluate the comparison.
1748  double left_val = ToDouble(LConstantOperand::cast(left));
1749  double right_val = ToDouble(LConstantOperand::cast(right));
1750  int next_block =
1751  EvalComparison(instr->op(), left_val, right_val) ? true_block
1752  : false_block;
1753  EmitGoto(next_block);
1754  } else {
1755  if (instr->is_double()) {
1756  // Don't base result on EFLAGS when a NaN is involved. Instead
1757  // jump to the false block.
1758  __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1759  __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1760  } else {
1761  int32_t value;
1762  if (right->IsConstantOperand()) {
1763  value = ToInteger32(LConstantOperand::cast(right));
1764  __ cmpl(ToRegister(left), Immediate(value));
1765  } else if (left->IsConstantOperand()) {
1766  value = ToInteger32(LConstantOperand::cast(left));
1767  if (right->IsRegister()) {
1768  __ cmpl(ToRegister(right), Immediate(value));
1769  } else {
1770  __ cmpl(ToOperand(right), Immediate(value));
1771  }
1772  // We transposed the operands. Reverse the condition.
1773  cc = ReverseCondition(cc);
1774  } else {
1775  if (right->IsRegister()) {
1776  __ cmpl(ToRegister(left), ToRegister(right));
1777  } else {
1778  __ cmpl(ToRegister(left), ToOperand(right));
1779  }
1780  }
1781  }
1782  EmitBranch(true_block, false_block, cc);
1783  }
1784 }
1785 
1786 
1787 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1788  Register left = ToRegister(instr->left());
1789  Register right = ToRegister(instr->right());
1790  int false_block = chunk_->LookupDestination(instr->false_block_id());
1791  int true_block = chunk_->LookupDestination(instr->true_block_id());
1792 
1793  __ cmpq(left, right);
1794  EmitBranch(true_block, false_block, equal);
1795 }
1796 
1797 
1798 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1799  Register left = ToRegister(instr->left());
1800  int true_block = chunk_->LookupDestination(instr->true_block_id());
1801  int false_block = chunk_->LookupDestination(instr->false_block_id());
1802 
1803  __ cmpq(left, Immediate(instr->hydrogen()->right()));
1804  EmitBranch(true_block, false_block, equal);
1805 }
1806 
1807 
1808 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1809  Register reg = ToRegister(instr->value());
1810  int false_block = chunk_->LookupDestination(instr->false_block_id());
1811 
1812  // If the expression is known to be untagged or a smi, then it's definitely
1813  // not null, and it can't be a an undetectable object.
1814  if (instr->hydrogen()->representation().IsSpecialization() ||
1815  instr->hydrogen()->type().IsSmi()) {
1816  EmitGoto(false_block);
1817  return;
1818  }
1819 
1820  int true_block = chunk_->LookupDestination(instr->true_block_id());
1821  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1822  Heap::kNullValueRootIndex :
1823  Heap::kUndefinedValueRootIndex;
1824  __ CompareRoot(reg, nil_value);
1825  if (instr->kind() == kStrictEquality) {
1826  EmitBranch(true_block, false_block, equal);
1827  } else {
1828  Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1829  Heap::kUndefinedValueRootIndex :
1830  Heap::kNullValueRootIndex;
1831  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1832  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1833  __ j(equal, true_label);
1834  __ CompareRoot(reg, other_nil_value);
1835  __ j(equal, true_label);
1836  __ JumpIfSmi(reg, false_label);
1837  // Check for undetectable objects by looking in the bit field in
1838  // the map. The object has already been smi checked.
1839  Register scratch = ToRegister(instr->temp());
1840  __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1841  __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
1842  Immediate(1 << Map::kIsUndetectable));
1843  EmitBranch(true_block, false_block, not_zero);
1844  }
1845 }
1846 
1847 
1848 Condition LCodeGen::EmitIsObject(Register input,
1849  Label* is_not_object,
1850  Label* is_object) {
1851  ASSERT(!input.is(kScratchRegister));
1852 
1853  __ JumpIfSmi(input, is_not_object);
1854 
1855  __ CompareRoot(input, Heap::kNullValueRootIndex);
1856  __ j(equal, is_object);
1857 
1859  // Undetectable objects behave like undefined.
1861  Immediate(1 << Map::kIsUndetectable));
1862  __ j(not_zero, is_not_object);
1863 
1864  __ movzxbl(kScratchRegister,
1867  __ j(below, is_not_object);
1869  return below_equal;
1870 }
1871 
1872 
1873 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1874  Register reg = ToRegister(instr->value());
1875 
1876  int true_block = chunk_->LookupDestination(instr->true_block_id());
1877  int false_block = chunk_->LookupDestination(instr->false_block_id());
1878  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1879  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1880 
1881  Condition true_cond = EmitIsObject(reg, false_label, true_label);
1882 
1883  EmitBranch(true_block, false_block, true_cond);
1884 }
1885 
1886 
1887 Condition LCodeGen::EmitIsString(Register input,
1888  Register temp1,
1889  Label* is_not_string) {
1890  __ JumpIfSmi(input, is_not_string);
1891  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
1892 
1893  return cond;
1894 }
1895 
1896 
1897 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1898  Register reg = ToRegister(instr->value());
1899  Register temp = ToRegister(instr->temp());
1900 
1901  int true_block = chunk_->LookupDestination(instr->true_block_id());
1902  int false_block = chunk_->LookupDestination(instr->false_block_id());
1903  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1904 
1905  Condition true_cond = EmitIsString(reg, temp, false_label);
1906 
1907  EmitBranch(true_block, false_block, true_cond);
1908 }
1909 
1910 
1911 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1912  int true_block = chunk_->LookupDestination(instr->true_block_id());
1913  int false_block = chunk_->LookupDestination(instr->false_block_id());
1914 
1915  Condition is_smi;
1916  if (instr->value()->IsRegister()) {
1917  Register input = ToRegister(instr->value());
1918  is_smi = masm()->CheckSmi(input);
1919  } else {
1920  Operand input = ToOperand(instr->value());
1921  is_smi = masm()->CheckSmi(input);
1922  }
1923  EmitBranch(true_block, false_block, is_smi);
1924 }
1925 
1926 
1927 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1928  Register input = ToRegister(instr->value());
1929  Register temp = ToRegister(instr->temp());
1930 
1931  int true_block = chunk_->LookupDestination(instr->true_block_id());
1932  int false_block = chunk_->LookupDestination(instr->false_block_id());
1933 
1934  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1935  __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
1936  __ testb(FieldOperand(temp, Map::kBitFieldOffset),
1937  Immediate(1 << Map::kIsUndetectable));
1938  EmitBranch(true_block, false_block, not_zero);
1939 }
1940 
1941 
1942 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1943  Token::Value op = instr->op();
1944  int true_block = chunk_->LookupDestination(instr->true_block_id());
1945  int false_block = chunk_->LookupDestination(instr->false_block_id());
1946 
1947  Handle<Code> ic = CompareIC::GetUninitialized(op);
1948  CallCode(ic, RelocInfo::CODE_TARGET, instr);
1949 
1950  Condition condition = TokenToCondition(op, false);
1951  __ testq(rax, rax);
1952 
1953  EmitBranch(true_block, false_block, condition);
1954 }
1955 
1956 
1957 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1958  InstanceType from = instr->from();
1959  InstanceType to = instr->to();
1960  if (from == FIRST_TYPE) return to;
1961  ASSERT(from == to || to == LAST_TYPE);
1962  return from;
1963 }
1964 
1965 
1966 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1967  InstanceType from = instr->from();
1968  InstanceType to = instr->to();
1969  if (from == to) return equal;
1970  if (to == LAST_TYPE) return above_equal;
1971  if (from == FIRST_TYPE) return below_equal;
1972  UNREACHABLE();
1973  return equal;
1974 }
1975 
1976 
1977 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1978  Register input = ToRegister(instr->value());
1979 
1980  int true_block = chunk_->LookupDestination(instr->true_block_id());
1981  int false_block = chunk_->LookupDestination(instr->false_block_id());
1982 
1983  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1984 
1985  __ JumpIfSmi(input, false_label);
1986 
1987  __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
1988  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
1989 }
1990 
1991 
1992 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1993  Register input = ToRegister(instr->value());
1994  Register result = ToRegister(instr->result());
1995 
1996  __ AssertString(input);
1997 
1998  __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2000  __ IndexFromHash(result, result);
2001 }
2002 
2003 
2004 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2005  LHasCachedArrayIndexAndBranch* instr) {
2006  Register input = ToRegister(instr->value());
2007 
2008  int true_block = chunk_->LookupDestination(instr->true_block_id());
2009  int false_block = chunk_->LookupDestination(instr->false_block_id());
2010 
2013  EmitBranch(true_block, false_block, equal);
2014 }
2015 
2016 
2017 // Branches to a label or falls through with the answer in the z flag.
2018 // Trashes the temp register.
2019 void LCodeGen::EmitClassOfTest(Label* is_true,
2020  Label* is_false,
2021  Handle<String> class_name,
2022  Register input,
2023  Register temp,
2024  Register temp2) {
2025  ASSERT(!input.is(temp));
2026  ASSERT(!input.is(temp2));
2027  ASSERT(!temp.is(temp2));
2028 
2029  __ JumpIfSmi(input, is_false);
2030 
2031  if (class_name->IsEqualTo(CStrVector("Function"))) {
2032  // Assuming the following assertions, we can use the same compares to test
2033  // for both being a function type and being in the object type range.
2038  LAST_SPEC_OBJECT_TYPE - 1);
2040  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2041  __ j(below, is_false);
2042  __ j(equal, is_true);
2043  __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2044  __ j(equal, is_true);
2045  } else {
2046  // Faster code path to avoid two compares: subtract lower bound from the
2047  // actual type and do a signed compare with the width of the type range.
2048  __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
2049  __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2050  __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2051  __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2053  __ j(above, is_false);
2054  }
2055 
2056  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2057  // Check if the constructor in the map is a function.
2058  __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
2059 
2060  // Objects with a non-function constructor have class 'Object'.
2061  __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
2062  if (class_name->IsEqualTo(CStrVector("Object"))) {
2063  __ j(not_equal, is_true);
2064  } else {
2065  __ j(not_equal, is_false);
2066  }
2067 
2068  // temp now contains the constructor function. Grab the
2069  // instance class name from there.
2071  __ movq(temp, FieldOperand(temp,
2073  // The class name we are testing against is a symbol because it's a literal.
2074  // The name in the constructor is a symbol because of the way the context is
2075  // booted. This routine isn't expected to work for random API-created
2076  // classes and it doesn't have to because you can't access it with natives
2077  // syntax. Since both sides are symbols it is sufficient to use an identity
2078  // comparison.
2079  ASSERT(class_name->IsSymbol());
2080  __ Cmp(temp, class_name);
2081  // End with the answer in the z flag.
2082 }
2083 
2084 
2085 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2086  Register input = ToRegister(instr->value());
2087  Register temp = ToRegister(instr->temp());
2088  Register temp2 = ToRegister(instr->temp2());
2089  Handle<String> class_name = instr->hydrogen()->class_name();
2090 
2091  int true_block = chunk_->LookupDestination(instr->true_block_id());
2092  int false_block = chunk_->LookupDestination(instr->false_block_id());
2093 
2094  Label* true_label = chunk_->GetAssemblyLabel(true_block);
2095  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2096 
2097  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2098 
2099  EmitBranch(true_block, false_block, equal);
2100 }
2101 
2102 
2103 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2104  Register reg = ToRegister(instr->value());
2105  int true_block = instr->true_block_id();
2106  int false_block = instr->false_block_id();
2107 
2108  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2109  EmitBranch(true_block, false_block, equal);
2110 }
2111 
2112 
2113 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2114  InstanceofStub stub(InstanceofStub::kNoFlags);
2115  __ push(ToRegister(instr->left()));
2116  __ push(ToRegister(instr->right()));
2117  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2118  Label true_value, done;
2119  __ testq(rax, rax);
2120  __ j(zero, &true_value, Label::kNear);
2121  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2122  __ jmp(&done, Label::kNear);
2123  __ bind(&true_value);
2124  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2125  __ bind(&done);
2126 }
2127 
2128 
2129 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2130  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2131  public:
2132  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2133  LInstanceOfKnownGlobal* instr)
2134  : LDeferredCode(codegen), instr_(instr) { }
2135  virtual void Generate() {
2136  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2137  }
2138  virtual LInstruction* instr() { return instr_; }
2139  Label* map_check() { return &map_check_; }
2140  private:
2141  LInstanceOfKnownGlobal* instr_;
2142  Label map_check_;
2143  };
2144 
2145 
2146  DeferredInstanceOfKnownGlobal* deferred;
2147  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2148 
2149  Label done, false_result;
2150  Register object = ToRegister(instr->value());
2151 
2152  // A Smi is not an instance of anything.
2153  __ JumpIfSmi(object, &false_result);
2154 
2155  // This is the inlined call site instanceof cache. The two occurences of the
2156  // hole value will be patched to the last map/result pair generated by the
2157  // instanceof stub.
2158  Label cache_miss;
2159  // Use a temp register to avoid memory operands with variable lengths.
2160  Register map = ToRegister(instr->temp());
2161  __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
2162  __ bind(deferred->map_check()); // Label for calculating code patching.
2163  Handle<JSGlobalPropertyCell> cache_cell =
2164  factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2165  __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
2166  __ cmpq(map, Operand(kScratchRegister, 0));
2167  __ j(not_equal, &cache_miss, Label::kNear);
2168  // Patched to load either true or false.
2169  __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2170 #ifdef DEBUG
2171  // Check that the code size between patch label and patch sites is invariant.
2172  Label end_of_patched_code;
2173  __ bind(&end_of_patched_code);
2174  ASSERT(true);
2175 #endif
2176  __ jmp(&done);
2177 
2178  // The inlined call site cache did not match. Check for null and string
2179  // before calling the deferred code.
2180  __ bind(&cache_miss); // Null is not an instance of anything.
2181  __ CompareRoot(object, Heap::kNullValueRootIndex);
2182  __ j(equal, &false_result, Label::kNear);
2183 
2184  // String values are not instances of anything.
2185  __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2186 
2187  __ bind(&false_result);
2188  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2189 
2190  __ bind(deferred->exit());
2191  __ bind(&done);
2192 }
2193 
2194 
2195 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2196  Label* map_check) {
2197  {
2198  PushSafepointRegistersScope scope(this);
2201  InstanceofStub stub(flags);
2202 
2203  __ push(ToRegister(instr->value()));
2204  __ PushHeapObject(instr->function());
2205 
2206  static const int kAdditionalDelta = 10;
2207  int delta =
2208  masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2209  ASSERT(delta >= 0);
2210  __ push_imm32(delta);
2211 
2212  // We are pushing three values on the stack but recording a
2213  // safepoint with two arguments because stub is going to
2214  // remove the third argument from the stack before jumping
2215  // to instanceof builtin on the slow path.
2216  CallCodeGeneric(stub.GetCode(),
2217  RelocInfo::CODE_TARGET,
2218  instr,
2219  RECORD_SAFEPOINT_WITH_REGISTERS,
2220  2);
2221  ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2222  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2223  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2224  // Move result to a register that survives the end of the
2225  // PushSafepointRegisterScope.
2226  __ movq(kScratchRegister, rax);
2227  }
2229  Label load_false;
2230  Label done;
2231  __ j(not_zero, &load_false);
2232  __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2233  __ jmp(&done);
2234  __ bind(&load_false);
2235  __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2236  __ bind(&done);
2237 }
2238 
2239 
2240 void LCodeGen::DoCmpT(LCmpT* instr) {
2241  Token::Value op = instr->op();
2242 
2243  Handle<Code> ic = CompareIC::GetUninitialized(op);
2244  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2245 
2246  Condition condition = TokenToCondition(op, false);
2247  Label true_value, done;
2248  __ testq(rax, rax);
2249  __ j(condition, &true_value, Label::kNear);
2250  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2251  __ jmp(&done, Label::kNear);
2252  __ bind(&true_value);
2253  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2254  __ bind(&done);
2255 }
2256 
2257 
2258 void LCodeGen::DoReturn(LReturn* instr) {
2259  if (FLAG_trace) {
2260  // Preserve the return value on the stack and rely on the runtime
2261  // call to return the value in the same register.
2262  __ push(rax);
2263  __ CallRuntime(Runtime::kTraceExit, 1);
2264  }
2265  __ movq(rsp, rbp);
2266  __ pop(rbp);
2267  __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
2268 }
2269 
2270 
2271 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2272  Register result = ToRegister(instr->result());
2273  __ LoadGlobalCell(result, instr->hydrogen()->cell());
2274  if (instr->hydrogen()->RequiresHoleCheck()) {
2275  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2276  DeoptimizeIf(equal, instr->environment());
2277  }
2278 }
2279 
2280 
2281 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2282  ASSERT(ToRegister(instr->global_object()).is(rax));
2283  ASSERT(ToRegister(instr->result()).is(rax));
2284 
2285  __ Move(rcx, instr->name());
2286  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
2287  RelocInfo::CODE_TARGET_CONTEXT;
2288  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2289  CallCode(ic, mode, instr);
2290 }
2291 
2292 
2293 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2294  Register value = ToRegister(instr->value());
2295  Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
2296 
2297  // If the cell we are storing to contains the hole it could have
2298  // been deleted from the property dictionary. In that case, we need
2299  // to update the property details in the property dictionary to mark
2300  // it as no longer deleted. We deoptimize in that case.
2301  if (instr->hydrogen()->RequiresHoleCheck()) {
2302  // We have a temp because CompareRoot might clobber kScratchRegister.
2303  Register cell = ToRegister(instr->temp());
2304  ASSERT(!value.is(cell));
2305  __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
2306  __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2307  DeoptimizeIf(equal, instr->environment());
2308  // Store the value.
2309  __ movq(Operand(cell, 0), value);
2310  } else {
2311  // Store the value.
2312  __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
2313  __ movq(Operand(kScratchRegister, 0), value);
2314  }
2315  // Cells are always rescanned, so no write barrier here.
2316 }
2317 
2318 
2319 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2320  ASSERT(ToRegister(instr->global_object()).is(rdx));
2321  ASSERT(ToRegister(instr->value()).is(rax));
2322 
2323  __ Move(rcx, instr->name());
2324  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2325  ? isolate()->builtins()->StoreIC_Initialize_Strict()
2326  : isolate()->builtins()->StoreIC_Initialize();
2327  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2328 }
2329 
2330 
2331 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2332  Register context = ToRegister(instr->context());
2333  Register result = ToRegister(instr->result());
2334  __ movq(result, ContextOperand(context, instr->slot_index()));
2335  if (instr->hydrogen()->RequiresHoleCheck()) {
2336  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2337  if (instr->hydrogen()->DeoptimizesOnHole()) {
2338  DeoptimizeIf(equal, instr->environment());
2339  } else {
2340  Label is_not_hole;
2341  __ j(not_equal, &is_not_hole, Label::kNear);
2342  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2343  __ bind(&is_not_hole);
2344  }
2345  }
2346 }
2347 
2348 
2349 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2350  Register context = ToRegister(instr->context());
2351  Register value = ToRegister(instr->value());
2352 
2353  Operand target = ContextOperand(context, instr->slot_index());
2354 
2355  Label skip_assignment;
2356  if (instr->hydrogen()->RequiresHoleCheck()) {
2357  __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2358  if (instr->hydrogen()->DeoptimizesOnHole()) {
2359  DeoptimizeIf(equal, instr->environment());
2360  } else {
2361  __ j(not_equal, &skip_assignment);
2362  }
2363  }
2364  __ movq(target, value);
2365 
2366  if (instr->hydrogen()->NeedsWriteBarrier()) {
2367  HType type = instr->hydrogen()->value()->type();
2368  SmiCheck check_needed =
2369  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2370  int offset = Context::SlotOffset(instr->slot_index());
2371  Register scratch = ToRegister(instr->temp());
2372  __ RecordWriteContextSlot(context,
2373  offset,
2374  value,
2375  scratch,
2376  kSaveFPRegs,
2378  check_needed);
2379  }
2380 
2381  __ bind(&skip_assignment);
2382 }
2383 
2384 
2385 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2386  Register object = ToRegister(instr->object());
2387  Register result = ToRegister(instr->result());
2388  if (instr->hydrogen()->is_in_object()) {
2389  __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
2390  } else {
2391  __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
2392  __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
2393  }
2394 }
2395 
2396 
2397 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2398  Register object,
2399  Handle<Map> type,
2400  Handle<String> name,
2401  LEnvironment* env) {
2402  LookupResult lookup(isolate());
2403  type->LookupDescriptor(NULL, *name, &lookup);
2404  ASSERT(lookup.IsFound() || lookup.IsCacheable());
2405  if (lookup.IsField()) {
2406  int index = lookup.GetLocalFieldIndexFromMap(*type);
2407  int offset = index * kPointerSize;
2408  if (index < 0) {
2409  // Negative property indices are in-object properties, indexed
2410  // from the end of the fixed part of the object.
2411  __ movq(result, FieldOperand(object, offset + type->instance_size()));
2412  } else {
2413  // Non-negative property indices are in the properties array.
2414  __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
2415  __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
2416  }
2417  } else if (lookup.IsConstantFunction()) {
2418  Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2419  __ LoadHeapObject(result, function);
2420  } else {
2421  // Negative lookup.
2422  // Check prototypes.
2423  Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
2424  Heap* heap = type->GetHeap();
2425  while (*current != heap->null_value()) {
2426  __ LoadHeapObject(result, current);
2427  __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
2428  Handle<Map>(current->map()));
2429  DeoptimizeIf(not_equal, env);
2430  current =
2431  Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
2432  }
2433  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2434  }
2435 }
2436 
2437 
2438 // Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
2439 // prototype chain, which causes unbounded code generation.
2440 static bool CompactEmit(SmallMapList* list,
2441  Handle<String> name,
2442  int i,
2443  Isolate* isolate) {
2444  Handle<Map> map = list->at(i);
2445  // If the map has ElementsKind transitions, we will generate map checks
2446  // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
2447  if (map->HasElementsTransition()) return false;
2448  LookupResult lookup(isolate);
2449  map->LookupDescriptor(NULL, *name, &lookup);
2450  return lookup.IsField() || lookup.IsConstantFunction();
2451 }
2452 
2453 
2454 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2455  Register object = ToRegister(instr->object());
2456  Register result = ToRegister(instr->result());
2457 
2458  int map_count = instr->hydrogen()->types()->length();
2459  bool need_generic = instr->hydrogen()->need_generic();
2460 
2461  if (map_count == 0 && !need_generic) {
2462  DeoptimizeIf(no_condition, instr->environment());
2463  return;
2464  }
2465  Handle<String> name = instr->hydrogen()->name();
2466  Label done;
2467  bool all_are_compact = true;
2468  for (int i = 0; i < map_count; ++i) {
2469  if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
2470  all_are_compact = false;
2471  break;
2472  }
2473  }
2474  for (int i = 0; i < map_count; ++i) {
2475  bool last = (i == map_count - 1);
2476  Handle<Map> map = instr->hydrogen()->types()->at(i);
2477  Label check_passed;
2478  __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
2479  if (last && !need_generic) {
2480  DeoptimizeIf(not_equal, instr->environment());
2481  __ bind(&check_passed);
2482  EmitLoadFieldOrConstantFunction(
2483  result, object, map, name, instr->environment());
2484  } else {
2485  Label next;
2486  bool compact = all_are_compact ? true :
2487  CompactEmit(instr->hydrogen()->types(), name, i, isolate());
2488  __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
2489  __ bind(&check_passed);
2490  EmitLoadFieldOrConstantFunction(
2491  result, object, map, name, instr->environment());
2492  __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
2493  __ bind(&next);
2494  }
2495  }
2496  if (need_generic) {
2497  __ Move(rcx, name);
2498  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2499  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2500  }
2501  __ bind(&done);
2502 }
2503 
2504 
2505 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2506  ASSERT(ToRegister(instr->object()).is(rax));
2507  ASSERT(ToRegister(instr->result()).is(rax));
2508 
2509  __ Move(rcx, instr->name());
2510  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2511  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2512 }
2513 
2514 
2515 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2516  Register function = ToRegister(instr->function());
2517  Register result = ToRegister(instr->result());
2518 
2519  // Check that the function really is a function.
2520  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2521  DeoptimizeIf(not_equal, instr->environment());
2522 
2523  // Check whether the function has an instance prototype.
2524  Label non_instance;
2525  __ testb(FieldOperand(result, Map::kBitFieldOffset),
2526  Immediate(1 << Map::kHasNonInstancePrototype));
2527  __ j(not_zero, &non_instance, Label::kNear);
2528 
2529  // Get the prototype or initial map from the function.
2530  __ movq(result,
2532 
2533  // Check that the function has a prototype or an initial map.
2534  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2535  DeoptimizeIf(equal, instr->environment());
2536 
2537  // If the function does not have an initial map, we're done.
2538  Label done;
2539  __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
2540  __ j(not_equal, &done, Label::kNear);
2541 
2542  // Get the prototype from the initial map.
2543  __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
2544  __ jmp(&done, Label::kNear);
2545 
2546  // Non-instance prototype: Fetch prototype from constructor field
2547  // in the function's map.
2548  __ bind(&non_instance);
2549  __ movq(result, FieldOperand(result, Map::kConstructorOffset));
2550 
2551  // All done.
2552  __ bind(&done);
2553 }
2554 
2555 
2556 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2557  Register result = ToRegister(instr->result());
2558  Register input = ToRegister(instr->object());
2559  __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
2560  if (FLAG_debug_code) {
2561  Label done, ok, fail;
2562  __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
2563  Heap::kFixedArrayMapRootIndex);
2564  __ j(equal, &done, Label::kNear);
2565  __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
2566  Heap::kFixedCOWArrayMapRootIndex);
2567  __ j(equal, &done, Label::kNear);
2568  Register temp((result.is(rax)) ? rbx : rax);
2569  __ push(temp);
2570  __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
2571  __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
2572  __ and_(temp, Immediate(Map::kElementsKindMask));
2573  __ shr(temp, Immediate(Map::kElementsKindShift));
2574  __ cmpl(temp, Immediate(GetInitialFastElementsKind()));
2575  __ j(less, &fail, Label::kNear);
2576  __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND));
2577  __ j(less_equal, &ok, Label::kNear);
2578  __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2579  __ j(less, &fail, Label::kNear);
2580  __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2581  __ j(less_equal, &ok, Label::kNear);
2582  __ bind(&fail);
2583  __ Abort("Check for fast or external elements failed");
2584  __ bind(&ok);
2585  __ pop(temp);
2586  __ bind(&done);
2587  }
2588 }
2589 
2590 
2591 void LCodeGen::DoLoadExternalArrayPointer(
2592  LLoadExternalArrayPointer* instr) {
2593  Register result = ToRegister(instr->result());
2594  Register input = ToRegister(instr->object());
2595  __ movq(result, FieldOperand(input,
2597 }
2598 
2599 
2600 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2601  Register arguments = ToRegister(instr->arguments());
2602  Register length = ToRegister(instr->length());
2603  Register result = ToRegister(instr->result());
2604  // There are two words between the frame pointer and the last argument.
2605  // Subtracting from length accounts for one of them add one more.
2606  if (instr->index()->IsRegister()) {
2607  __ subl(length, ToRegister(instr->index()));
2608  } else {
2609  __ subl(length, ToOperand(instr->index()));
2610  }
2611  __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
2612 }
2613 
2614 
2615 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2616  Register result = ToRegister(instr->result());
2617  LOperand* key = instr->key();
2618  if (!key->IsConstantOperand()) {
2619  Register key_reg = ToRegister(key);
2620  // Even though the HLoad/StoreKeyedFastElement instructions force the input
2621  // representation for the key to be an integer, the input gets replaced
2622  // during bound check elimination with the index argument to the bounds
2623  // check, which can be tagged, so that case must be handled here, too.
2624  if (instr->hydrogen()->key()->representation().IsTagged()) {
2625  __ SmiToInteger64(key_reg, key_reg);
2626  } else if (instr->hydrogen()->IsDehoisted()) {
2627  // Sign extend key because it could be a 32 bit negative value
2628  // and the dehoisted address computation happens in 64 bits
2629  __ movsxlq(key_reg, key_reg);
2630  }
2631  }
2632 
2633  // Load the result.
2634  __ movq(result,
2635  BuildFastArrayOperand(instr->elements(),
2636  key,
2637  FAST_ELEMENTS,
2639  instr->additional_index()));
2640 
2641  // Check for the hole value.
2642  if (instr->hydrogen()->RequiresHoleCheck()) {
2643  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2644  Condition smi = __ CheckSmi(result);
2645  DeoptimizeIf(NegateCondition(smi), instr->environment());
2646  } else {
2647  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2648  DeoptimizeIf(equal, instr->environment());
2649  }
2650  }
2651 }
2652 
2653 
2654 void LCodeGen::DoLoadKeyedFastDoubleElement(
2655  LLoadKeyedFastDoubleElement* instr) {
2656  XMMRegister result(ToDoubleRegister(instr->result()));
2657  LOperand* key = instr->key();
2658  if (!key->IsConstantOperand()) {
2659  Register key_reg = ToRegister(key);
2660  // Even though the HLoad/StoreKeyedFastElement instructions force the input
2661  // representation for the key to be an integer, the input gets replaced
2662  // during bound check elimination with the index argument to the bounds
2663  // check, which can be tagged, so that case must be handled here, too.
2664  if (instr->hydrogen()->key()->representation().IsTagged()) {
2665  __ SmiToInteger64(key_reg, key_reg);
2666  } else if (instr->hydrogen()->IsDehoisted()) {
2667  // Sign extend key because it could be a 32 bit negative value
2668  // and the dehoisted address computation happens in 64 bits
2669  __ movsxlq(key_reg, key_reg);
2670  }
2671  }
2672 
2673  if (instr->hydrogen()->RequiresHoleCheck()) {
2674  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
2675  sizeof(kHoleNanLower32);
2676  Operand hole_check_operand = BuildFastArrayOperand(
2677  instr->elements(),
2678  key,
2680  offset,
2681  instr->additional_index());
2682  __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
2683  DeoptimizeIf(equal, instr->environment());
2684  }
2685 
2686  Operand double_load_operand = BuildFastArrayOperand(
2687  instr->elements(),
2688  key,
2691  instr->additional_index());
2692  __ movsd(result, double_load_operand);
2693 }
2694 
2695 
2696 Operand LCodeGen::BuildFastArrayOperand(
2697  LOperand* elements_pointer,
2698  LOperand* key,
2699  ElementsKind elements_kind,
2700  uint32_t offset,
2701  uint32_t additional_index) {
2702  Register elements_pointer_reg = ToRegister(elements_pointer);
2703  int shift_size = ElementsKindToShiftSize(elements_kind);
2704  if (key->IsConstantOperand()) {
2705  int constant_value = ToInteger32(LConstantOperand::cast(key));
2706  if (constant_value & 0xF0000000) {
2707  Abort("array index constant value too big");
2708  }
2709  return Operand(elements_pointer_reg,
2710  ((constant_value + additional_index) << shift_size)
2711  + offset);
2712  } else {
2713  ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
2714  return Operand(elements_pointer_reg,
2715  ToRegister(key),
2716  scale_factor,
2717  offset + (additional_index << shift_size));
2718  }
2719 }
2720 
2721 
2722 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2723  LLoadKeyedSpecializedArrayElement* instr) {
2724  ElementsKind elements_kind = instr->elements_kind();
2725  LOperand* key = instr->key();
2726  if (!key->IsConstantOperand()) {
2727  Register key_reg = ToRegister(key);
2728  // Even though the HLoad/StoreKeyedFastElement instructions force the input
2729  // representation for the key to be an integer, the input gets replaced
2730  // during bound check elimination with the index argument to the bounds
2731  // check, which can be tagged, so that case must be handled here, too.
2732  if (instr->hydrogen()->key()->representation().IsTagged()) {
2733  __ SmiToInteger64(key_reg, key_reg);
2734  } else if (instr->hydrogen()->IsDehoisted()) {
2735  // Sign extend key because it could be a 32 bit negative value
2736  // and the dehoisted address computation happens in 64 bits
2737  __ movsxlq(key_reg, key_reg);
2738  }
2739  }
2740  Operand operand(BuildFastArrayOperand(
2741  instr->external_pointer(),
2742  key,
2743  elements_kind,
2744  0,
2745  instr->additional_index()));
2746 
2747  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2748  XMMRegister result(ToDoubleRegister(instr->result()));
2749  __ movss(result, operand);
2750  __ cvtss2sd(result, result);
2751  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2752  __ movsd(ToDoubleRegister(instr->result()), operand);
2753  } else {
2754  Register result(ToRegister(instr->result()));
2755  switch (elements_kind) {
2757  __ movsxbq(result, operand);
2758  break;
2761  __ movzxbq(result, operand);
2762  break;
2764  __ movsxwq(result, operand);
2765  break;
2767  __ movzxwq(result, operand);
2768  break;
2769  case EXTERNAL_INT_ELEMENTS:
2770  __ movsxlq(result, operand);
2771  break;
2773  __ movl(result, operand);
2774  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2775  __ testl(result, result);
2776  DeoptimizeIf(negative, instr->environment());
2777  }
2778  break;
2781  case FAST_ELEMENTS:
2782  case FAST_SMI_ELEMENTS:
2783  case FAST_DOUBLE_ELEMENTS:
2784  case FAST_HOLEY_ELEMENTS:
2787  case DICTIONARY_ELEMENTS:
2789  UNREACHABLE();
2790  break;
2791  }
2792  }
2793 }
2794 
2795 
2796 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2797  ASSERT(ToRegister(instr->object()).is(rdx));
2798  ASSERT(ToRegister(instr->key()).is(rax));
2799 
2800  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2801  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2802 }
2803 
2804 
2805 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2806  Register result = ToRegister(instr->result());
2807 
2808  if (instr->hydrogen()->from_inlined()) {
2809  __ lea(result, Operand(rsp, -2 * kPointerSize));
2810  } else {
2811  // Check for arguments adapter frame.
2812  Label done, adapted;
2813  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2814  __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
2816  __ j(equal, &adapted, Label::kNear);
2817 
2818  // No arguments adaptor frame.
2819  __ movq(result, rbp);
2820  __ jmp(&done, Label::kNear);
2821 
2822  // Arguments adaptor frame present.
2823  __ bind(&adapted);
2824  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2825 
2826  // Result is the frame pointer for the frame if not adapted and for the real
2827  // frame below the adaptor frame if adapted.
2828  __ bind(&done);
2829  }
2830 }
2831 
2832 
2833 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2834  Register result = ToRegister(instr->result());
2835 
2836  Label done;
2837 
2838  // If no arguments adaptor frame the number of arguments is fixed.
2839  if (instr->elements()->IsRegister()) {
2840  __ cmpq(rbp, ToRegister(instr->elements()));
2841  } else {
2842  __ cmpq(rbp, ToOperand(instr->elements()));
2843  }
2844  __ movl(result, Immediate(scope()->num_parameters()));
2845  __ j(equal, &done, Label::kNear);
2846 
2847  // Arguments adaptor frame present. Get argument length from there.
2848  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2849  __ SmiToInteger32(result,
2850  Operand(result,
2852 
2853  // Argument length is in result register.
2854  __ bind(&done);
2855 }
2856 
2857 
2858 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2859  Register receiver = ToRegister(instr->receiver());
2860  Register function = ToRegister(instr->function());
2861 
2862  // If the receiver is null or undefined, we have to pass the global
2863  // object as a receiver to normal functions. Values have to be
2864  // passed unchanged to builtins and strict-mode functions.
2865  Label global_object, receiver_ok;
2866 
2867  // Do not transform the receiver to object for strict mode
2868  // functions.
2869  __ movq(kScratchRegister,
2874  __ j(not_equal, &receiver_ok, Label::kNear);
2875 
2876  // Do not transform the receiver to object for builtins.
2880  __ j(not_equal, &receiver_ok, Label::kNear);
2881 
2882  // Normal function. Replace undefined or null with global receiver.
2883  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
2884  __ j(equal, &global_object, Label::kNear);
2885  __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
2886  __ j(equal, &global_object, Label::kNear);
2887 
2888  // The receiver should be a JS object.
2889  Condition is_smi = __ CheckSmi(receiver);
2890  DeoptimizeIf(is_smi, instr->environment());
2891  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
2892  DeoptimizeIf(below, instr->environment());
2893  __ jmp(&receiver_ok, Label::kNear);
2894 
2895  __ bind(&global_object);
2896  // TODO(kmillikin): We have a hydrogen value for the global object. See
2897  // if it's better to use it than to explicitly fetch it from the context
2898  // here.
2900  __ movq(receiver,
2902  __ bind(&receiver_ok);
2903 }
2904 
2905 
2906 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2907  Register receiver = ToRegister(instr->receiver());
2908  Register function = ToRegister(instr->function());
2909  Register length = ToRegister(instr->length());
2910  Register elements = ToRegister(instr->elements());
2911  ASSERT(receiver.is(rax)); // Used for parameter count.
2912  ASSERT(function.is(rdi)); // Required by InvokeFunction.
2913  ASSERT(ToRegister(instr->result()).is(rax));
2914 
2915  // Copy the arguments to this function possibly from the
2916  // adaptor frame below it.
2917  const uint32_t kArgumentsLimit = 1 * KB;
2918  __ cmpq(length, Immediate(kArgumentsLimit));
2919  DeoptimizeIf(above, instr->environment());
2920 
2921  __ push(receiver);
2922  __ movq(receiver, length);
2923 
2924  // Loop through the arguments pushing them onto the execution
2925  // stack.
2926  Label invoke, loop;
2927  // length is a small non-negative integer, due to the test above.
2928  __ testl(length, length);
2929  __ j(zero, &invoke, Label::kNear);
2930  __ bind(&loop);
2931  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
2932  __ decl(length);
2933  __ j(not_zero, &loop);
2934 
2935  // Invoke the function.
2936  __ bind(&invoke);
2937  ASSERT(instr->HasPointerMap());
2938  LPointerMap* pointers = instr->pointer_map();
2939  RecordPosition(pointers->position());
2940  SafepointGenerator safepoint_generator(
2941  this, pointers, Safepoint::kLazyDeopt);
2942  ParameterCount actual(rax);
2943  __ InvokeFunction(function, actual, CALL_FUNCTION,
2944  safepoint_generator, CALL_AS_METHOD);
2946 }
2947 
2948 
2949 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2950  LOperand* argument = instr->value();
2951  EmitPushTaggedOperand(argument);
2952 }
2953 
2954 
2955 void LCodeGen::DoDrop(LDrop* instr) {
2956  __ Drop(instr->count());
2957 }
2958 
2959 
2960 void LCodeGen::DoThisFunction(LThisFunction* instr) {
2961  Register result = ToRegister(instr->result());
2962  __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
2963 }
2964 
2965 
2966 void LCodeGen::DoContext(LContext* instr) {
2967  Register result = ToRegister(instr->result());
2968  __ movq(result, rsi);
2969 }
2970 
2971 
2972 void LCodeGen::DoOuterContext(LOuterContext* instr) {
2973  Register context = ToRegister(instr->context());
2974  Register result = ToRegister(instr->result());
2975  __ movq(result,
2976  Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2977 }
2978 
2979 
2980 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2981  __ push(rsi); // The context is the first argument.
2982  __ PushHeapObject(instr->hydrogen()->pairs());
2983  __ Push(Smi::FromInt(instr->hydrogen()->flags()));
2984  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
2985 }
2986 
2987 
2988 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2989  Register result = ToRegister(instr->result());
2990  __ movq(result, GlobalObjectOperand());
2991 }
2992 
2993 
2994 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2995  Register global = ToRegister(instr->global());
2996  Register result = ToRegister(instr->result());
2997  __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
2998 }
2999 
3000 
3001 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3002  int arity,
3003  LInstruction* instr,
3004  CallKind call_kind,
3005  RDIState rdi_state) {
3006  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
3007  function->shared()->formal_parameter_count() == arity;
3008 
3009  LPointerMap* pointers = instr->pointer_map();
3010  RecordPosition(pointers->position());
3011 
3012  if (can_invoke_directly) {
3013  if (rdi_state == RDI_UNINITIALIZED) {
3014  __ LoadHeapObject(rdi, function);
3015  }
3016 
3017  // Change context.
3019 
3020  // Set rax to arguments count if adaption is not needed. Assumes that rax
3021  // is available to write to at this point.
3022  if (!function->NeedsArgumentsAdaption()) {
3023  __ Set(rax, arity);
3024  }
3025 
3026  // Invoke function.
3027  __ SetCallKind(rcx, call_kind);
3028  if (*function == *info()->closure()) {
3029  __ CallSelf();
3030  } else {
3032  }
3033 
3034  // Set up deoptimization.
3035  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3036  } else {
3037  // We need to adapt arguments.
3038  SafepointGenerator generator(
3039  this, pointers, Safepoint::kLazyDeopt);
3040  ParameterCount count(arity);
3041  __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
3042  }
3043 
3044  // Restore context.
3046 }
3047 
3048 
3049 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3050  ASSERT(ToRegister(instr->result()).is(rax));
3051  CallKnownFunction(instr->function(),
3052  instr->arity(),
3053  instr,
3055  RDI_UNINITIALIZED);
3056 }
3057 
3058 
3059 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3060  Register input_reg = ToRegister(instr->value());
3061  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3062  Heap::kHeapNumberMapRootIndex);
3063  DeoptimizeIf(not_equal, instr->environment());
3064 
3065  Label done;
3066  Register tmp = input_reg.is(rax) ? rcx : rax;
3067  Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3068 
3069  // Preserve the value of all registers.
3070  PushSafepointRegistersScope scope(this);
3071 
3072  Label negative;
3073  __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3074  // Check the sign of the argument. If the argument is positive, just
3075  // return it. We do not need to patch the stack since |input| and
3076  // |result| are the same register and |input| will be restored
3077  // unchanged by popping safepoint registers.
3078  __ testl(tmp, Immediate(HeapNumber::kSignMask));
3079  __ j(not_zero, &negative);
3080  __ jmp(&done);
3081 
3082  __ bind(&negative);
3083 
3084  Label allocated, slow;
3085  __ AllocateHeapNumber(tmp, tmp2, &slow);
3086  __ jmp(&allocated);
3087 
3088  // Slow case: Call the runtime system to do the number allocation.
3089  __ bind(&slow);
3090 
3091  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3092  // Set the pointer to the new heap number in tmp.
3093  if (!tmp.is(rax)) {
3094  __ movq(tmp, rax);
3095  }
3096 
3097  // Restore input_reg after call to runtime.
3098  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3099 
3100  __ bind(&allocated);
3101  __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3102  __ shl(tmp2, Immediate(1));
3103  __ shr(tmp2, Immediate(1));
3104  __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3105  __ StoreToSafepointRegisterSlot(input_reg, tmp);
3106 
3107  __ bind(&done);
3108 }
3109 
3110 
3111 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3112  Register input_reg = ToRegister(instr->value());
3113  __ testl(input_reg, input_reg);
3114  Label is_positive;
3115  __ j(not_sign, &is_positive);
3116  __ negl(input_reg); // Sets flags.
3117  DeoptimizeIf(negative, instr->environment());
3118  __ bind(&is_positive);
3119 }
3120 
3121 
3122 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3123  // Class for deferred case.
3124  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3125  public:
3126  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3127  LUnaryMathOperation* instr)
3128  : LDeferredCode(codegen), instr_(instr) { }
3129  virtual void Generate() {
3130  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3131  }
3132  virtual LInstruction* instr() { return instr_; }
3133  private:
3134  LUnaryMathOperation* instr_;
3135  };
3136 
3137  ASSERT(instr->value()->Equals(instr->result()));
3138  Representation r = instr->hydrogen()->value()->representation();
3139 
3140  if (r.IsDouble()) {
3141  XMMRegister scratch = xmm0;
3142  XMMRegister input_reg = ToDoubleRegister(instr->value());
3143  __ xorps(scratch, scratch);
3144  __ subsd(scratch, input_reg);
3145  __ andpd(input_reg, scratch);
3146  } else if (r.IsInteger32()) {
3147  EmitIntegerMathAbs(instr);
3148  } else { // Tagged case.
3149  DeferredMathAbsTaggedHeapNumber* deferred =
3150  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3151  Register input_reg = ToRegister(instr->value());
3152  // Smi check.
3153  __ JumpIfNotSmi(input_reg, deferred->entry());
3154  __ SmiToInteger32(input_reg, input_reg);
3155  EmitIntegerMathAbs(instr);
3156  __ Integer32ToSmi(input_reg, input_reg);
3157  __ bind(deferred->exit());
3158  }
3159 }
3160 
3161 
3162 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3163  XMMRegister xmm_scratch = xmm0;
3164  Register output_reg = ToRegister(instr->result());
3165  XMMRegister input_reg = ToDoubleRegister(instr->value());
3166 
3168  CpuFeatures::Scope scope(SSE4_1);
3169  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3170  // Deoptimize if minus zero.
3171  __ movq(output_reg, input_reg);
3172  __ subq(output_reg, Immediate(1));
3173  DeoptimizeIf(overflow, instr->environment());
3174  }
3175  __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3176  __ cvttsd2si(output_reg, xmm_scratch);
3177  __ cmpl(output_reg, Immediate(0x80000000));
3178  DeoptimizeIf(equal, instr->environment());
3179  } else {
3180  Label negative_sign, done;
3181  // Deoptimize on negative inputs.
3182  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3183  __ ucomisd(input_reg, xmm_scratch);
3184  DeoptimizeIf(parity_even, instr->environment());
3185  __ j(below, &negative_sign, Label::kNear);
3186 
3187  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3188  // Check for negative zero.
3189  Label positive_sign;
3190  __ j(above, &positive_sign, Label::kNear);
3191  __ movmskpd(output_reg, input_reg);
3192  __ testq(output_reg, Immediate(1));
3193  DeoptimizeIf(not_zero, instr->environment());
3194  __ Set(output_reg, 0);
3195  __ jmp(&done);
3196  __ bind(&positive_sign);
3197  }
3198 
3199  // Use truncating instruction (OK because input is positive).
3200  __ cvttsd2si(output_reg, input_reg);
3201  // Overflow is signalled with minint.
3202  __ cmpl(output_reg, Immediate(0x80000000));
3203  DeoptimizeIf(equal, instr->environment());
3204  __ jmp(&done, Label::kNear);
3205 
3206  // Non-zero negative reaches here.
3207  __ bind(&negative_sign);
3208  // Truncate, then compare and compensate.
3209  __ cvttsd2si(output_reg, input_reg);
3210  __ cvtlsi2sd(xmm_scratch, output_reg);
3211  __ ucomisd(input_reg, xmm_scratch);
3212  __ j(equal, &done, Label::kNear);
3213  __ subl(output_reg, Immediate(1));
3214  DeoptimizeIf(overflow, instr->environment());
3215 
3216  __ bind(&done);
3217  }
3218 }
3219 
3220 
3221 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3222  const XMMRegister xmm_scratch = xmm0;
3223  Register output_reg = ToRegister(instr->result());
3224  XMMRegister input_reg = ToDoubleRegister(instr->value());
3225 
3226  Label done;
3227  // xmm_scratch = 0.5
3228  __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
3229  __ movq(xmm_scratch, kScratchRegister);
3230  Label below_half;
3231  __ ucomisd(xmm_scratch, input_reg);
3232  // If input_reg is NaN, this doesn't jump.
3233  __ j(above, &below_half, Label::kNear);
3234  // input = input + 0.5
3235  // This addition might give a result that isn't the correct for
3236  // rounding, due to loss of precision, but only for a number that's
3237  // so big that the conversion below will overflow anyway.
3238  __ addsd(xmm_scratch, input_reg);
3239  // Compute Math.floor(input).
3240  // Use truncating instruction (OK because input is positive).
3241  __ cvttsd2si(output_reg, xmm_scratch);
3242  // Overflow is signalled with minint.
3243  __ cmpl(output_reg, Immediate(0x80000000));
3244  DeoptimizeIf(equal, instr->environment());
3245  __ jmp(&done);
3246 
3247  __ bind(&below_half);
3248  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3249  // Bailout if negative (including -0).
3250  __ movq(output_reg, input_reg);
3251  __ testq(output_reg, output_reg);
3252  DeoptimizeIf(negative, instr->environment());
3253  } else {
3254  // Bailout if below -0.5, otherwise round to (positive) zero, even
3255  // if negative.
3256  // xmm_scrach = -0.5
3257  __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
3258  __ movq(xmm_scratch, kScratchRegister);
3259  __ ucomisd(input_reg, xmm_scratch);
3260  DeoptimizeIf(below, instr->environment());
3261  }
3262  __ xorl(output_reg, output_reg);
3263 
3264  __ bind(&done);
3265 }
3266 
3267 
3268 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3269  XMMRegister input_reg = ToDoubleRegister(instr->value());
3270  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3271  __ sqrtsd(input_reg, input_reg);
3272 }
3273 
3274 
3275 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3276  XMMRegister xmm_scratch = xmm0;
3277  XMMRegister input_reg = ToDoubleRegister(instr->value());
3278  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3279 
3280  // Note that according to ECMA-262 15.8.2.13:
3281  // Math.pow(-Infinity, 0.5) == Infinity
3282  // Math.sqrt(-Infinity) == NaN
3283  Label done, sqrt;
3284  // Check base for -Infinity. According to IEEE-754, double-precision
3285  // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3286  __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
3287  __ movq(xmm_scratch, kScratchRegister);
3288  __ ucomisd(xmm_scratch, input_reg);
3289  // Comparing -Infinity with NaN results in "unordered", which sets the
3290  // zero flag as if both were equal. However, it also sets the carry flag.
3291  __ j(not_equal, &sqrt, Label::kNear);
3292  __ j(carry, &sqrt, Label::kNear);
3293  // If input is -Infinity, return Infinity.
3294  __ xorps(input_reg, input_reg);
3295  __ subsd(input_reg, xmm_scratch);
3296  __ jmp(&done, Label::kNear);
3297 
3298  // Square root.
3299  __ bind(&sqrt);
3300  __ xorps(xmm_scratch, xmm_scratch);
3301  __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3302  __ sqrtsd(input_reg, input_reg);
3303  __ bind(&done);
3304 }
3305 
3306 
3307 void LCodeGen::DoPower(LPower* instr) {
3308  Representation exponent_type = instr->hydrogen()->right()->representation();
3309  // Having marked this as a call, we can use any registers.
3310  // Just make sure that the input/output registers are the expected ones.
3311 
3312  // Choose register conforming to calling convention (when bailing out).
3313 #ifdef _WIN64
3314  Register exponent = rdx;
3315 #else
3316  Register exponent = rdi;
3317 #endif
3318  ASSERT(!instr->right()->IsRegister() ||
3319  ToRegister(instr->right()).is(exponent));
3320  ASSERT(!instr->right()->IsDoubleRegister() ||
3321  ToDoubleRegister(instr->right()).is(xmm1));
3322  ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
3323  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
3324 
3325  if (exponent_type.IsTagged()) {
3326  Label no_deopt;
3327  __ JumpIfSmi(exponent, &no_deopt);
3328  __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
3329  DeoptimizeIf(not_equal, instr->environment());
3330  __ bind(&no_deopt);
3331  MathPowStub stub(MathPowStub::TAGGED);
3332  __ CallStub(&stub);
3333  } else if (exponent_type.IsInteger32()) {
3334  MathPowStub stub(MathPowStub::INTEGER);
3335  __ CallStub(&stub);
3336  } else {
3337  ASSERT(exponent_type.IsDouble());
3338  MathPowStub stub(MathPowStub::DOUBLE);
3339  __ CallStub(&stub);
3340  }
3341 }
3342 
3343 
3344 void LCodeGen::DoRandom(LRandom* instr) {
3345  class DeferredDoRandom: public LDeferredCode {
3346  public:
3347  DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3348  : LDeferredCode(codegen), instr_(instr) { }
3349  virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3350  virtual LInstruction* instr() { return instr_; }
3351  private:
3352  LRandom* instr_;
3353  };
3354 
3355  DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3356 
3357  // Having marked this instruction as a call we can use any
3358  // registers.
3359  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3360 
3361  // Choose the right register for the first argument depending on
3362  // calling convention.
3363 #ifdef _WIN64
3364  ASSERT(ToRegister(instr->global_object()).is(rcx));
3365  Register global_object = rcx;
3366 #else
3367  ASSERT(ToRegister(instr->global_object()).is(rdi));
3368  Register global_object = rdi;
3369 #endif
3370 
3371  static const int kSeedSize = sizeof(uint32_t);
3372  STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
3373 
3374  __ movq(global_object,
3376  static const int kRandomSeedOffset =
3378  __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
3379  // rbx: FixedArray of the native context's random seeds
3380 
3381  // Load state[0].
3383  // If state[0] == 0, call runtime to initialize seeds.
3384  __ testl(rax, rax);
3385  __ j(zero, deferred->entry());
3386  // Load state[1].
3387  __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
3388 
3389  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3390  // Only operate on the lower 32 bit of rax.
3391  __ movl(rdx, rax);
3392  __ andl(rdx, Immediate(0xFFFF));
3393  __ imull(rdx, rdx, Immediate(18273));
3394  __ shrl(rax, Immediate(16));
3395  __ addl(rax, rdx);
3396  // Save state[0].
3398 
3399  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3400  __ movl(rdx, rcx);
3401  __ andl(rdx, Immediate(0xFFFF));
3402  __ imull(rdx, rdx, Immediate(36969));
3403  __ shrl(rcx, Immediate(16));
3404  __ addl(rcx, rdx);
3405  // Save state[1].
3406  __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
3407 
3408  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3409  __ shll(rax, Immediate(14));
3410  __ andl(rcx, Immediate(0x3FFFF));
3411  __ addl(rax, rcx);
3412 
3413  __ bind(deferred->exit());
3414  // Convert 32 random bits in rax to 0.(32 random bits) in a double
3415  // by computing:
3416  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
3417  __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
3418  __ movd(xmm2, rcx);
3419  __ movd(xmm1, rax);
3420  __ cvtss2sd(xmm2, xmm2);
3421  __ xorps(xmm1, xmm2);
3422  __ subsd(xmm1, xmm2);
3423 }
3424 
3425 
3426 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3427  __ PrepareCallCFunction(1);
3428  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3430  // Return value is in rax.
3431 }
3432 
3433 
3434 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3435  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3436  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3438  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3439 }
3440 
3441 
3442 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3443  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3444  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3446  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3447 }
3448 
3449 
3450 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3451  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3452  TranscendentalCacheStub stub(TranscendentalCache::COS,
3454  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3455 }
3456 
3457 
3458 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3459  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3460  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3462  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3463 }
3464 
3465 
3466 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3467  switch (instr->op()) {
3468  case kMathAbs:
3469  DoMathAbs(instr);
3470  break;
3471  case kMathFloor:
3472  DoMathFloor(instr);
3473  break;
3474  case kMathRound:
3475  DoMathRound(instr);
3476  break;
3477  case kMathSqrt:
3478  DoMathSqrt(instr);
3479  break;
3480  case kMathPowHalf:
3481  DoMathPowHalf(instr);
3482  break;
3483  case kMathCos:
3484  DoMathCos(instr);
3485  break;
3486  case kMathSin:
3487  DoMathSin(instr);
3488  break;
3489  case kMathTan:
3490  DoMathTan(instr);
3491  break;
3492  case kMathLog:
3493  DoMathLog(instr);
3494  break;
3495 
3496  default:
3497  UNREACHABLE();
3498  }
3499 }
3500 
3501 
3502 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3503  ASSERT(ToRegister(instr->function()).is(rdi));
3504  ASSERT(instr->HasPointerMap());
3505 
3506  if (instr->known_function().is_null()) {
3507  LPointerMap* pointers = instr->pointer_map();
3508  RecordPosition(pointers->position());
3509  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3510  ParameterCount count(instr->arity());
3511  __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3513  } else {
3514  CallKnownFunction(instr->known_function(),
3515  instr->arity(),
3516  instr,
3518  RDI_CONTAINS_TARGET);
3519  }
3520 }
3521 
3522 
3523 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3524  ASSERT(ToRegister(instr->key()).is(rcx));
3525  ASSERT(ToRegister(instr->result()).is(rax));
3526 
3527  int arity = instr->arity();
3528  Handle<Code> ic =
3529  isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3530  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3532 }
3533 
3534 
3535 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3536  ASSERT(ToRegister(instr->result()).is(rax));
3537 
3538  int arity = instr->arity();
3539  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3540  Handle<Code> ic =
3541  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3542  __ Move(rcx, instr->name());
3543  CallCode(ic, mode, instr);
3545 }
3546 
3547 
3548 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3549  ASSERT(ToRegister(instr->function()).is(rdi));
3550  ASSERT(ToRegister(instr->result()).is(rax));
3551 
3552  int arity = instr->arity();
3553  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3554  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3556 }
3557 
3558 
3559 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3560  ASSERT(ToRegister(instr->result()).is(rax));
3561  int arity = instr->arity();
3562  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3563  Handle<Code> ic =
3564  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3565  __ Move(rcx, instr->name());
3566  CallCode(ic, mode, instr);
3568 }
3569 
3570 
3571 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3572  ASSERT(ToRegister(instr->result()).is(rax));
3573  CallKnownFunction(instr->target(),
3574  instr->arity(),
3575  instr,
3577  RDI_UNINITIALIZED);
3578 }
3579 
3580 
3581 void LCodeGen::DoCallNew(LCallNew* instr) {
3582  ASSERT(ToRegister(instr->constructor()).is(rdi));
3583  ASSERT(ToRegister(instr->result()).is(rax));
3584 
3585  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3586  __ Set(rax, instr->arity());
3587  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3588 }
3589 
3590 
3591 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3592  CallRuntime(instr->function(), instr->arity(), instr);
3593 }
3594 
3595 
3596 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3597  Register object = ToRegister(instr->object());
3598  Register value = ToRegister(instr->value());
3599  int offset = instr->offset();
3600 
3601  if (!instr->transition().is_null()) {
3602  if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
3603  __ Move(FieldOperand(object, HeapObject::kMapOffset),
3604  instr->transition());
3605  } else {
3606  Register temp = ToRegister(instr->temp());
3607  __ Move(kScratchRegister, instr->transition());
3609  // Update the write barrier for the map field.
3610  __ RecordWriteField(object,
3613  temp,
3614  kSaveFPRegs,
3616  OMIT_SMI_CHECK);
3617  }
3618  }
3619 
3620  // Do the store.
3621  HType type = instr->hydrogen()->value()->type();
3622  SmiCheck check_needed =
3623  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3624  if (instr->is_in_object()) {
3625  __ movq(FieldOperand(object, offset), value);
3626  if (instr->hydrogen()->NeedsWriteBarrier()) {
3627  Register temp = ToRegister(instr->temp());
3628  // Update the write barrier for the object for in-object properties.
3629  __ RecordWriteField(object,
3630  offset,
3631  value,
3632  temp,
3633  kSaveFPRegs,
3635  check_needed);
3636  }
3637  } else {
3638  Register temp = ToRegister(instr->temp());
3639  __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
3640  __ movq(FieldOperand(temp, offset), value);
3641  if (instr->hydrogen()->NeedsWriteBarrier()) {
3642  // Update the write barrier for the properties array.
3643  // object is used as a scratch register.
3644  __ RecordWriteField(temp,
3645  offset,
3646  value,
3647  object,
3648  kSaveFPRegs,
3650  check_needed);
3651  }
3652  }
3653 }
3654 
3655 
3656 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3657  ASSERT(ToRegister(instr->object()).is(rdx));
3658  ASSERT(ToRegister(instr->value()).is(rax));
3659 
3660  __ Move(rcx, instr->hydrogen()->name());
3661  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3662  ? isolate()->builtins()->StoreIC_Initialize_Strict()
3663  : isolate()->builtins()->StoreIC_Initialize();
3664  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3665 }
3666 
3667 
3668 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3669  LStoreKeyedSpecializedArrayElement* instr) {
3670  ElementsKind elements_kind = instr->elements_kind();
3671  LOperand* key = instr->key();
3672  if (!key->IsConstantOperand()) {
3673  Register key_reg = ToRegister(key);
3674  // Even though the HLoad/StoreKeyedFastElement instructions force the input
3675  // representation for the key to be an integer, the input gets replaced
3676  // during bound check elimination with the index argument to the bounds
3677  // check, which can be tagged, so that case must be handled here, too.
3678  if (instr->hydrogen()->key()->representation().IsTagged()) {
3679  __ SmiToInteger64(key_reg, key_reg);
3680  } else if (instr->hydrogen()->IsDehoisted()) {
3681  // Sign extend key because it could be a 32 bit negative value
3682  // and the dehoisted address computation happens in 64 bits
3683  __ movsxlq(key_reg, key_reg);
3684  }
3685  }
3686  Operand operand(BuildFastArrayOperand(
3687  instr->external_pointer(),
3688  key,
3689  elements_kind,
3690  0,
3691  instr->additional_index()));
3692 
3693  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3694  XMMRegister value(ToDoubleRegister(instr->value()));
3695  __ cvtsd2ss(value, value);
3696  __ movss(operand, value);
3697  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3698  __ movsd(operand, ToDoubleRegister(instr->value()));
3699  } else {
3700  Register value(ToRegister(instr->value()));
3701  switch (elements_kind) {
3705  __ movb(operand, value);
3706  break;
3709  __ movw(operand, value);
3710  break;
3711  case EXTERNAL_INT_ELEMENTS:
3713  __ movl(operand, value);
3714  break;
3717  case FAST_ELEMENTS:
3718  case FAST_SMI_ELEMENTS:
3719  case FAST_DOUBLE_ELEMENTS:
3720  case FAST_HOLEY_ELEMENTS:
3723  case DICTIONARY_ELEMENTS:
3725  UNREACHABLE();
3726  break;
3727  }
3728  }
3729 }
3730 
3731 
3732 void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
3733  HValue* value,
3734  LOperand* operand) {
3735  if (value->representation().IsTagged() && !value->type().IsSmi()) {
3736  Condition cc;
3737  if (operand->IsRegister()) {
3738  cc = masm()->CheckSmi(ToRegister(operand));
3739  } else {
3740  cc = masm()->CheckSmi(ToOperand(operand));
3741  }
3742  DeoptimizeIf(NegateCondition(cc), environment);
3743  }
3744 }
3745 
3746 
3747 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3748  DeoptIfTaggedButNotSmi(instr->environment(),
3749  instr->hydrogen()->length(),
3750  instr->length());
3751  DeoptIfTaggedButNotSmi(instr->environment(),
3752  instr->hydrogen()->index(),
3753  instr->index());
3754  if (instr->length()->IsRegister()) {
3755  Register reg = ToRegister(instr->length());
3756  if (!instr->hydrogen()->length()->representation().IsTagged()) {
3757  __ AssertZeroExtended(reg);
3758  }
3759  if (instr->index()->IsConstantOperand()) {
3760  int constant_index =
3761  ToInteger32(LConstantOperand::cast(instr->index()));
3762  if (instr->hydrogen()->length()->representation().IsTagged()) {
3763  __ Cmp(reg, Smi::FromInt(constant_index));
3764  } else {
3765  __ cmpq(reg, Immediate(constant_index));
3766  }
3767  } else {
3768  Register reg2 = ToRegister(instr->index());
3769  if (!instr->hydrogen()->index()->representation().IsTagged()) {
3770  __ AssertZeroExtended(reg2);
3771  }
3772  __ cmpq(reg, reg2);
3773  }
3774  } else {
3775  Operand length = ToOperand(instr->length());
3776  if (instr->index()->IsConstantOperand()) {
3777  int constant_index =
3778  ToInteger32(LConstantOperand::cast(instr->index()));
3779  if (instr->hydrogen()->length()->representation().IsTagged()) {
3780  __ Cmp(length, Smi::FromInt(constant_index));
3781  } else {
3782  __ cmpq(length, Immediate(constant_index));
3783  }
3784  } else {
3785  __ cmpq(length, ToRegister(instr->index()));
3786  }
3787  }
3788  DeoptimizeIf(below_equal, instr->environment());
3789 }
3790 
3791 
3792 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3793  Register value = ToRegister(instr->value());
3794  Register elements = ToRegister(instr->object());
3795  LOperand* key = instr->key();
3796  if (!key->IsConstantOperand()) {
3797  Register key_reg = ToRegister(key);
3798  // Even though the HLoad/StoreKeyedFastElement instructions force the input
3799  // representation for the key to be an integer, the input gets replaced
3800  // during bound check elimination with the index argument to the bounds
3801  // check, which can be tagged, so that case must be handled here, too.
3802  if (instr->hydrogen()->key()->representation().IsTagged()) {
3803  __ SmiToInteger64(key_reg, key_reg);
3804  } else if (instr->hydrogen()->IsDehoisted()) {
3805  // Sign extend key because it could be a 32 bit negative value
3806  // and the dehoisted address computation happens in 64 bits
3807  __ movsxlq(key_reg, key_reg);
3808  }
3809  }
3810 
3811  Operand operand =
3812  BuildFastArrayOperand(instr->object(),
3813  key,
3814  FAST_ELEMENTS,
3816  instr->additional_index());
3817 
3818  if (instr->hydrogen()->NeedsWriteBarrier()) {
3819  ASSERT(!instr->key()->IsConstantOperand());
3820  HType type = instr->hydrogen()->value()->type();
3821  SmiCheck check_needed =
3822  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3823  // Compute address of modified element and store it into key register.
3824  Register key_reg(ToRegister(key));
3825  __ lea(key_reg, operand);
3826  __ movq(Operand(key_reg, 0), value);
3827  __ RecordWrite(elements,
3828  key_reg,
3829  value,
3830  kSaveFPRegs,
3832  check_needed);
3833  } else {
3834  __ movq(operand, value);
3835  }
3836 }
3837 
3838 
3839 void LCodeGen::DoStoreKeyedFastDoubleElement(
3840  LStoreKeyedFastDoubleElement* instr) {
3841  XMMRegister value = ToDoubleRegister(instr->value());
3842  LOperand* key = instr->key();
3843  if (!key->IsConstantOperand()) {
3844  Register key_reg = ToRegister(key);
3845  // Even though the HLoad/StoreKeyedFastElement instructions force the input
3846  // representation for the key to be an integer, the input gets replaced
3847  // during bound check elimination with the index argument to the bounds
3848  // check, which can be tagged, so that case must be handled here, too.
3849  if (instr->hydrogen()->key()->representation().IsTagged()) {
3850  __ SmiToInteger64(key_reg, key_reg);
3851  } else if (instr->hydrogen()->IsDehoisted()) {
3852  // Sign extend key because it could be a 32 bit negative value
3853  // and the dehoisted address computation happens in 64 bits
3854  __ movsxlq(key_reg, key_reg);
3855  }
3856  }
3857 
3858  if (instr->NeedsCanonicalization()) {
3859  Label have_value;
3860 
3861  __ ucomisd(value, value);
3862  __ j(parity_odd, &have_value); // NaN.
3863 
3864  __ Set(kScratchRegister, BitCast<uint64_t>(
3866  __ movq(value, kScratchRegister);
3867 
3868  __ bind(&have_value);
3869  }
3870 
3871  Operand double_store_operand = BuildFastArrayOperand(
3872  instr->elements(),
3873  key,
3876  instr->additional_index());
3877 
3878  __ movsd(double_store_operand, value);
3879 }
3880 
3881 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3882  ASSERT(ToRegister(instr->object()).is(rdx));
3883  ASSERT(ToRegister(instr->key()).is(rcx));
3884  ASSERT(ToRegister(instr->value()).is(rax));
3885 
3886  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3887  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3888  : isolate()->builtins()->KeyedStoreIC_Initialize();
3889  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3890 }
3891 
3892 
3893 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3894  Register object_reg = ToRegister(instr->object());
3895  Register new_map_reg = ToRegister(instr->new_map_temp());
3896 
3897  Handle<Map> from_map = instr->original_map();
3898  Handle<Map> to_map = instr->transitioned_map();
3899  ElementsKind from_kind = from_map->elements_kind();
3900  ElementsKind to_kind = to_map->elements_kind();
3901 
3902  Label not_applicable;
3903  __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
3904  __ j(not_equal, &not_applicable);
3905  __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
3906  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
3907  __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
3908  // Write barrier.
3909  ASSERT_NE(instr->temp(), NULL);
3910  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3911  ToRegister(instr->temp()), kDontSaveFPRegs);
3912  } else if (IsFastSmiElementsKind(from_kind) &&
3913  IsFastDoubleElementsKind(to_kind)) {
3914  Register fixed_object_reg = ToRegister(instr->temp());
3915  ASSERT(fixed_object_reg.is(rdx));
3916  ASSERT(new_map_reg.is(rbx));
3917  __ movq(fixed_object_reg, object_reg);
3918  CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3919  RelocInfo::CODE_TARGET, instr);
3920  } else if (IsFastDoubleElementsKind(from_kind) &&
3921  IsFastObjectElementsKind(to_kind)) {
3922  Register fixed_object_reg = ToRegister(instr->temp());
3923  ASSERT(fixed_object_reg.is(rdx));
3924  ASSERT(new_map_reg.is(rbx));
3925  __ movq(fixed_object_reg, object_reg);
3926  CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3927  RelocInfo::CODE_TARGET, instr);
3928  } else {
3929  UNREACHABLE();
3930  }
3931  __ bind(&not_applicable);
3932 }
3933 
3934 
3935 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3936  EmitPushTaggedOperand(instr->left());
3937  EmitPushTaggedOperand(instr->right());
3938  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3939  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3940 }
3941 
3942 
3943 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3944  class DeferredStringCharCodeAt: public LDeferredCode {
3945  public:
3946  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3947  : LDeferredCode(codegen), instr_(instr) { }
3948  virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3949  virtual LInstruction* instr() { return instr_; }
3950  private:
3951  LStringCharCodeAt* instr_;
3952  };
3953 
3954  DeferredStringCharCodeAt* deferred =
3955  new(zone()) DeferredStringCharCodeAt(this, instr);
3956 
3958  ToRegister(instr->string()),
3959  ToRegister(instr->index()),
3960  ToRegister(instr->result()),
3961  deferred->entry());
3962  __ bind(deferred->exit());
3963 }
3964 
3965 
3966 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3967  Register string = ToRegister(instr->string());
3968  Register result = ToRegister(instr->result());
3969 
3970  // TODO(3095996): Get rid of this. For now, we need to make the
3971  // result register contain a valid pointer because it is already
3972  // contained in the register pointer map.
3973  __ Set(result, 0);
3974 
3975  PushSafepointRegistersScope scope(this);
3976  __ push(string);
3977  // Push the index as a smi. This is safe because of the checks in
3978  // DoStringCharCodeAt above.
3980  if (instr->index()->IsConstantOperand()) {
3981  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3982  __ Push(Smi::FromInt(const_index));
3983  } else {
3984  Register index = ToRegister(instr->index());
3985  __ Integer32ToSmi(index, index);
3986  __ push(index);
3987  }
3988  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3989  __ AssertSmi(rax);
3990  __ SmiToInteger32(rax, rax);
3991  __ StoreToSafepointRegisterSlot(result, rax);
3992 }
3993 
3994 
3995 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3996  class DeferredStringCharFromCode: public LDeferredCode {
3997  public:
3998  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3999  : LDeferredCode(codegen), instr_(instr) { }
4000  virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4001  virtual LInstruction* instr() { return instr_; }
4002  private:
4003  LStringCharFromCode* instr_;
4004  };
4005 
4006  DeferredStringCharFromCode* deferred =
4007  new(zone()) DeferredStringCharFromCode(this, instr);
4008 
4009  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4010  Register char_code = ToRegister(instr->char_code());
4011  Register result = ToRegister(instr->result());
4012  ASSERT(!char_code.is(result));
4013 
4014  __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode));
4015  __ j(above, deferred->entry());
4016  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4017  __ movq(result, FieldOperand(result,
4018  char_code, times_pointer_size,
4020  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4021  __ j(equal, deferred->entry());
4022  __ bind(deferred->exit());
4023 }
4024 
4025 
4026 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4027  Register char_code = ToRegister(instr->char_code());
4028  Register result = ToRegister(instr->result());
4029 
4030  // TODO(3095996): Get rid of this. For now, we need to make the
4031  // result register contain a valid pointer because it is already
4032  // contained in the register pointer map.
4033  __ Set(result, 0);
4034 
4035  PushSafepointRegistersScope scope(this);
4036  __ Integer32ToSmi(char_code, char_code);
4037  __ push(char_code);
4038  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4039  __ StoreToSafepointRegisterSlot(result, rax);
4040 }
4041 
4042 
4043 void LCodeGen::DoStringLength(LStringLength* instr) {
4044  Register string = ToRegister(instr->string());
4045  Register result = ToRegister(instr->result());
4046  __ movq(result, FieldOperand(string, String::kLengthOffset));
4047 }
4048 
4049 
4050 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4051  LOperand* input = instr->value();
4052  ASSERT(input->IsRegister() || input->IsStackSlot());
4053  LOperand* output = instr->result();
4054  ASSERT(output->IsDoubleRegister());
4055  if (input->IsRegister()) {
4056  __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4057  } else {
4058  __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4059  }
4060 }
4061 
4062 
4063 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4064  LOperand* input = instr->value();
4065  LOperand* output = instr->result();
4066  LOperand* temp = instr->temp();
4067 
4068  __ LoadUint32(ToDoubleRegister(output),
4069  ToRegister(input),
4070  ToDoubleRegister(temp));
4071 }
4072 
4073 
4074 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4075  LOperand* input = instr->value();
4076  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4077  Register reg = ToRegister(input);
4078 
4079  __ Integer32ToSmi(reg, reg);
4080 }
4081 
4082 
4083 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4084  class DeferredNumberTagU: public LDeferredCode {
4085  public:
4086  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4087  : LDeferredCode(codegen), instr_(instr) { }
4088  virtual void Generate() {
4089  codegen()->DoDeferredNumberTagU(instr_);
4090  }
4091  virtual LInstruction* instr() { return instr_; }
4092  private:
4093  LNumberTagU* instr_;
4094  };
4095 
4096  LOperand* input = instr->value();
4097  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4098  Register reg = ToRegister(input);
4099 
4100  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4101  __ cmpl(reg, Immediate(Smi::kMaxValue));
4102  __ j(above, deferred->entry());
4103  __ Integer32ToSmi(reg, reg);
4104  __ bind(deferred->exit());
4105 }
4106 
4107 
4108 void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
4109  Label slow;
4110  Register reg = ToRegister(instr->value());
4111  Register tmp = reg.is(rax) ? rcx : rax;
4112 
4113  // Preserve the value of all registers.
4114  PushSafepointRegistersScope scope(this);
4115 
4116  Label done;
4117  // Load value into xmm1 which will be preserved across potential call to
4118  // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4119  // XMM registers on x64).
4120  __ LoadUint32(xmm1, reg, xmm0);
4121 
4122  if (FLAG_inline_new) {
4123  __ AllocateHeapNumber(reg, tmp, &slow);
4124  __ jmp(&done, Label::kNear);
4125  }
4126 
4127  // Slow case: Call the runtime system to do the number allocation.
4128  __ bind(&slow);
4129 
4130  // Put a valid pointer value in the stack slot where the result
4131  // register is stored, as this register is in the pointer map, but contains an
4132  // integer value.
4133  __ StoreToSafepointRegisterSlot(reg, Immediate(0));
4134 
4135  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4136  if (!reg.is(rax)) __ movq(reg, rax);
4137 
4138  // Done. Put the value in xmm1 into the value of the allocated heap
4139  // number.
4140  __ bind(&done);
4142  __ StoreToSafepointRegisterSlot(reg, reg);
4143 }
4144 
4145 
4146 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4147  class DeferredNumberTagD: public LDeferredCode {
4148  public:
4149  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4150  : LDeferredCode(codegen), instr_(instr) { }
4151  virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4152  virtual LInstruction* instr() { return instr_; }
4153  private:
4154  LNumberTagD* instr_;
4155  };
4156 
4157  XMMRegister input_reg = ToDoubleRegister(instr->value());
4158  Register reg = ToRegister(instr->result());
4159  Register tmp = ToRegister(instr->temp());
4160 
4161  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4162  if (FLAG_inline_new) {
4163  __ AllocateHeapNumber(reg, tmp, deferred->entry());
4164  } else {
4165  __ jmp(deferred->entry());
4166  }
4167  __ bind(deferred->exit());
4168  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4169 }
4170 
4171 
4172 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4173  // TODO(3095996): Get rid of this. For now, we need to make the
4174  // result register contain a valid pointer because it is already
4175  // contained in the register pointer map.
4176  Register reg = ToRegister(instr->result());
4177  __ Move(reg, Smi::FromInt(0));
4178 
4179  {
4180  PushSafepointRegistersScope scope(this);
4181  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4182  // Ensure that value in rax survives popping registers.
4183  __ movq(kScratchRegister, rax);
4184  }
4185  __ movq(reg, kScratchRegister);
4186 }
4187 
4188 
4189 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4190  ASSERT(instr->value()->Equals(instr->result()));
4191  Register input = ToRegister(instr->value());
4192  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4193  __ Integer32ToSmi(input, input);
4194 }
4195 
4196 
4197 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4198  ASSERT(instr->value()->Equals(instr->result()));
4199  Register input = ToRegister(instr->value());
4200  if (instr->needs_check()) {
4201  Condition is_smi = __ CheckSmi(input);
4202  DeoptimizeIf(NegateCondition(is_smi), instr->environment());
4203  } else {
4204  __ AssertSmi(input);
4205  }
4206  __ SmiToInteger32(input, input);
4207 }
4208 
4209 
4210 void LCodeGen::EmitNumberUntagD(Register input_reg,
4211  XMMRegister result_reg,
4212  bool deoptimize_on_undefined,
4213  bool deoptimize_on_minus_zero,
4214  LEnvironment* env) {
4215  Label load_smi, done;
4216 
4217  // Smi check.
4218  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4219 
4220  // Heap number map check.
4221  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4222  Heap::kHeapNumberMapRootIndex);
4223  if (deoptimize_on_undefined) {
4224  DeoptimizeIf(not_equal, env);
4225  } else {
4226  Label heap_number;
4227  __ j(equal, &heap_number, Label::kNear);
4228 
4229  __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4230  DeoptimizeIf(not_equal, env);
4231 
4232  // Convert undefined to NaN. Compute NaN as 0/0.
4233  __ xorps(result_reg, result_reg);
4234  __ divsd(result_reg, result_reg);
4235  __ jmp(&done, Label::kNear);
4236 
4237  __ bind(&heap_number);
4238  }
4239  // Heap number to XMM conversion.
4240  __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4241  if (deoptimize_on_minus_zero) {
4242  XMMRegister xmm_scratch = xmm0;
4243  __ xorps(xmm_scratch, xmm_scratch);
4244  __ ucomisd(xmm_scratch, result_reg);
4245  __ j(not_equal, &done, Label::kNear);
4246  __ movmskpd(kScratchRegister, result_reg);
4247  __ testq(kScratchRegister, Immediate(1));
4248  DeoptimizeIf(not_zero, env);
4249  }
4250  __ jmp(&done, Label::kNear);
4251 
4252  // Smi to XMM conversion
4253  __ bind(&load_smi);
4254  __ SmiToInteger32(kScratchRegister, input_reg);
4255  __ cvtlsi2sd(result_reg, kScratchRegister);
4256  __ bind(&done);
4257 }
4258 
4259 
4260 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4261  Label done, heap_number;
4262  Register input_reg = ToRegister(instr->value());
4263 
4264  // Heap number map check.
4265  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4266  Heap::kHeapNumberMapRootIndex);
4267 
4268  if (instr->truncating()) {
4269  __ j(equal, &heap_number, Label::kNear);
4270  // Check for undefined. Undefined is converted to zero for truncating
4271  // conversions.
4272  __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4273  DeoptimizeIf(not_equal, instr->environment());
4274  __ Set(input_reg, 0);
4275  __ jmp(&done, Label::kNear);
4276 
4277  __ bind(&heap_number);
4278 
4279  __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4280  __ cvttsd2siq(input_reg, xmm0);
4281  __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
4282  __ cmpq(input_reg, kScratchRegister);
4283  DeoptimizeIf(equal, instr->environment());
4284  } else {
4285  // Deoptimize if we don't have a heap number.
4286  DeoptimizeIf(not_equal, instr->environment());
4287 
4288  XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4289  __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4290  __ cvttsd2si(input_reg, xmm0);
4291  __ cvtlsi2sd(xmm_temp, input_reg);
4292  __ ucomisd(xmm0, xmm_temp);
4293  DeoptimizeIf(not_equal, instr->environment());
4294  DeoptimizeIf(parity_even, instr->environment()); // NaN.
4295  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4296  __ testl(input_reg, input_reg);
4297  __ j(not_zero, &done);
4298  __ movmskpd(input_reg, xmm0);
4299  __ andl(input_reg, Immediate(1));
4300  DeoptimizeIf(not_zero, instr->environment());
4301  }
4302  }
4303  __ bind(&done);
4304 }
4305 
4306 
4307 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4308  class DeferredTaggedToI: public LDeferredCode {
4309  public:
4310  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4311  : LDeferredCode(codegen), instr_(instr) { }
4312  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4313  virtual LInstruction* instr() { return instr_; }
4314  private:
4315  LTaggedToI* instr_;
4316  };
4317 
4318  LOperand* input = instr->value();
4319  ASSERT(input->IsRegister());
4320  ASSERT(input->Equals(instr->result()));
4321 
4322  Register input_reg = ToRegister(input);
4323  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4324  __ JumpIfNotSmi(input_reg, deferred->entry());
4325  __ SmiToInteger32(input_reg, input_reg);
4326  __ bind(deferred->exit());
4327 }
4328 
4329 
4330 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4331  LOperand* input = instr->value();
4332  ASSERT(input->IsRegister());
4333  LOperand* result = instr->result();
4334  ASSERT(result->IsDoubleRegister());
4335 
4336  Register input_reg = ToRegister(input);
4337  XMMRegister result_reg = ToDoubleRegister(result);
4338 
4339  EmitNumberUntagD(input_reg, result_reg,
4340  instr->hydrogen()->deoptimize_on_undefined(),
4341  instr->hydrogen()->deoptimize_on_minus_zero(),
4342  instr->environment());
4343 }
4344 
4345 
4346 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4347  LOperand* input = instr->value();
4348  ASSERT(input->IsDoubleRegister());
4349  LOperand* result = instr->result();
4350  ASSERT(result->IsRegister());
4351 
4352  XMMRegister input_reg = ToDoubleRegister(input);
4353  Register result_reg = ToRegister(result);
4354 
4355  if (instr->truncating()) {
4356  // Performs a truncating conversion of a floating point number as used by
4357  // the JS bitwise operations.
4358  __ cvttsd2siq(result_reg, input_reg);
4359  __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
4360  __ cmpq(result_reg, kScratchRegister);
4361  DeoptimizeIf(equal, instr->environment());
4362  } else {
4363  __ cvttsd2si(result_reg, input_reg);
4364  __ cvtlsi2sd(xmm0, result_reg);
4365  __ ucomisd(xmm0, input_reg);
4366  DeoptimizeIf(not_equal, instr->environment());
4367  DeoptimizeIf(parity_even, instr->environment()); // NaN.
4368  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4369  Label done;
4370  // The integer converted back is equal to the original. We
4371  // only have to test if we got -0 as an input.
4372  __ testl(result_reg, result_reg);
4373  __ j(not_zero, &done, Label::kNear);
4374  __ movmskpd(result_reg, input_reg);
4375  // Bit 0 contains the sign of the double in input_reg.
4376  // If input was positive, we are ok and return 0, otherwise
4377  // deoptimize.
4378  __ andl(result_reg, Immediate(1));
4379  DeoptimizeIf(not_zero, instr->environment());
4380  __ bind(&done);
4381  }
4382  }
4383 }
4384 
4385 
4386 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4387  LOperand* input = instr->value();
4388  Condition cc = masm()->CheckSmi(ToRegister(input));
4389  DeoptimizeIf(NegateCondition(cc), instr->environment());
4390 }
4391 
4392 
4393 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4394  LOperand* input = instr->value();
4395  Condition cc = masm()->CheckSmi(ToRegister(input));
4396  DeoptimizeIf(cc, instr->environment());
4397 }
4398 
4399 
4400 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4401  Register input = ToRegister(instr->value());
4402 
4404 
4405  if (instr->hydrogen()->is_interval_check()) {
4406  InstanceType first;
4407  InstanceType last;
4408  instr->hydrogen()->GetCheckInterval(&first, &last);
4409 
4411  Immediate(static_cast<int8_t>(first)));
4412 
4413  // If there is only one type in the interval check for equality.
4414  if (first == last) {
4415  DeoptimizeIf(not_equal, instr->environment());
4416  } else {
4417  DeoptimizeIf(below, instr->environment());
4418  // Omit check for the last type.
4419  if (last != LAST_TYPE) {
4421  Immediate(static_cast<int8_t>(last)));
4422  DeoptimizeIf(above, instr->environment());
4423  }
4424  }
4425  } else {
4426  uint8_t mask;
4427  uint8_t tag;
4428  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4429 
4430  if (IsPowerOf2(mask)) {
4431  ASSERT(tag == 0 || IsPowerOf2(tag));
4433  Immediate(mask));
4434  DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
4435  } else {
4436  __ movzxbl(kScratchRegister,
4438  __ andb(kScratchRegister, Immediate(mask));
4439  __ cmpb(kScratchRegister, Immediate(tag));
4440  DeoptimizeIf(not_equal, instr->environment());
4441  }
4442  }
4443 }
4444 
4445 
4446 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4447  Register reg = ToRegister(instr->value());
4448  Handle<JSFunction> target = instr->hydrogen()->target();
4449  if (isolate()->heap()->InNewSpace(*target)) {
4450  Handle<JSGlobalPropertyCell> cell =
4451  isolate()->factory()->NewJSGlobalPropertyCell(target);
4452  __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
4453  __ cmpq(reg, Operand(kScratchRegister, 0));
4454  } else {
4455  __ Cmp(reg, target);
4456  }
4457  DeoptimizeIf(not_equal, instr->environment());
4458 }
4459 
4460 
4461 void LCodeGen::DoCheckMapCommon(Register reg,
4462  Handle<Map> map,
4463  CompareMapMode mode,
4464  LEnvironment* env) {
4465  Label success;
4466  __ CompareMap(reg, map, &success, mode);
4467  DeoptimizeIf(not_equal, env);
4468  __ bind(&success);
4469 }
4470 
4471 
4472 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4473  LOperand* input = instr->value();
4474  ASSERT(input->IsRegister());
4475  Register reg = ToRegister(input);
4476 
4477  Label success;
4478  SmallMapList* map_set = instr->hydrogen()->map_set();
4479  for (int i = 0; i < map_set->length() - 1; i++) {
4480  Handle<Map> map = map_set->at(i);
4481  __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
4482  __ j(equal, &success);
4483  }
4484  Handle<Map> map = map_set->last();
4485  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
4486  __ bind(&success);
4487 }
4488 
4489 
4490 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4491  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
4492  Register result_reg = ToRegister(instr->result());
4493  __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
4494 }
4495 
4496 
4497 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4498  ASSERT(instr->unclamped()->Equals(instr->result()));
4499  Register value_reg = ToRegister(instr->result());
4500  __ ClampUint8(value_reg);
4501 }
4502 
4503 
4504 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4505  ASSERT(instr->unclamped()->Equals(instr->result()));
4506  Register input_reg = ToRegister(instr->unclamped());
4507  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
4508  Label is_smi, done, heap_number;
4509 
4510  __ JumpIfSmi(input_reg, &is_smi);
4511 
4512  // Check for heap number
4513  __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4514  factory()->heap_number_map());
4515  __ j(equal, &heap_number, Label::kNear);
4516 
4517  // Check for undefined. Undefined is converted to zero for clamping
4518  // conversions.
4519  __ Cmp(input_reg, factory()->undefined_value());
4520  DeoptimizeIf(not_equal, instr->environment());
4521  __ movq(input_reg, Immediate(0));
4522  __ jmp(&done, Label::kNear);
4523 
4524  // Heap number
4525  __ bind(&heap_number);
4526  __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4527  __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
4528  __ jmp(&done, Label::kNear);
4529 
4530  // smi
4531  __ bind(&is_smi);
4532  __ SmiToInteger32(input_reg, input_reg);
4533  __ ClampUint8(input_reg);
4534 
4535  __ bind(&done);
4536 }
4537 
4538 
4539 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4540  Register reg = ToRegister(instr->temp());
4541 
4542  Handle<JSObject> holder = instr->holder();
4543  Handle<JSObject> current_prototype = instr->prototype();
4544 
4545  // Load prototype object.
4546  __ LoadHeapObject(reg, current_prototype);
4547 
4548  // Check prototype maps up to the holder.
4549  while (!current_prototype.is_identical_to(holder)) {
4550  DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4551  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4552  current_prototype =
4553  Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4554  // Load next prototype object.
4555  __ LoadHeapObject(reg, current_prototype);
4556  }
4557 
4558  // Check the holder map.
4559  DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4560  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4561 }
4562 
4563 
4564 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4565  class DeferredAllocateObject: public LDeferredCode {
4566  public:
4567  DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4568  : LDeferredCode(codegen), instr_(instr) { }
4569  virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4570  virtual LInstruction* instr() { return instr_; }
4571  private:
4572  LAllocateObject* instr_;
4573  };
4574 
4575  DeferredAllocateObject* deferred =
4576  new(zone()) DeferredAllocateObject(this, instr);
4577 
4578  Register result = ToRegister(instr->result());
4579  Register scratch = ToRegister(instr->temp());
4580  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4581  Handle<Map> initial_map(constructor->initial_map());
4582  int instance_size = initial_map->instance_size();
4583  ASSERT(initial_map->pre_allocated_property_fields() +
4584  initial_map->unused_property_fields() -
4585  initial_map->inobject_properties() == 0);
4586 
4587  // Allocate memory for the object. The initial map might change when
4588  // the constructor's prototype changes, but instance size and property
4589  // counts remain unchanged (if slack tracking finished).
4590  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4591  __ AllocateInNewSpace(instance_size,
4592  result,
4593  no_reg,
4594  scratch,
4595  deferred->entry(),
4596  TAG_OBJECT);
4597 
4598  __ bind(deferred->exit());
4599  if (FLAG_debug_code) {
4600  Label is_in_new_space;
4601  __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4602  __ Abort("Allocated object is not in new-space");
4603  __ bind(&is_in_new_space);
4604  }
4605 
4606  // Load the initial map.
4607  Register map = scratch;
4608  __ LoadHeapObject(scratch, constructor);
4610 
4611  if (FLAG_debug_code) {
4612  __ AssertNotSmi(map);
4614  Immediate(instance_size >> kPointerSizeLog2));
4615  __ Assert(equal, "Unexpected instance size");
4617  Immediate(initial_map->pre_allocated_property_fields()));
4618  __ Assert(equal, "Unexpected pre-allocated property fields count");
4620  Immediate(initial_map->unused_property_fields()));
4621  __ Assert(equal, "Unexpected unused property fields count");
4623  Immediate(initial_map->inobject_properties()));
4624  __ Assert(equal, "Unexpected in-object property fields count");
4625  }
4626 
4627  // Initialize map and fields of the newly allocated object.
4628  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4629  __ movq(FieldOperand(result, JSObject::kMapOffset), map);
4630  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4631  __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
4632  __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
4633  if (initial_map->inobject_properties() != 0) {
4634  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4635  for (int i = 0; i < initial_map->inobject_properties(); i++) {
4636  int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4637  __ movq(FieldOperand(result, property_offset), scratch);
4638  }
4639  }
4640 }
4641 
4642 
4643 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4644  Register result = ToRegister(instr->result());
4645  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4646  Handle<Map> initial_map(constructor->initial_map());
4647  int instance_size = initial_map->instance_size();
4648 
4649  // TODO(3095996): Get rid of this. For now, we need to make the
4650  // result register contain a valid pointer because it is already
4651  // contained in the register pointer map.
4652  __ Set(result, 0);
4653 
4654  PushSafepointRegistersScope scope(this);
4655  __ Push(Smi::FromInt(instance_size));
4656  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4657  __ StoreToSafepointRegisterSlot(result, rax);
4658 }
4659 
4660 
4661 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4662  Handle<FixedArray> literals(instr->environment()->closure()->literals());
4663  ElementsKind boilerplate_elements_kind =
4664  instr->hydrogen()->boilerplate_elements_kind();
4665 
4666  // Deopt if the array literal boilerplate ElementsKind is of a type different
4667  // than the expected one. The check isn't necessary if the boilerplate has
4668  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4670  boilerplate_elements_kind, true)) {
4671  __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
4673  // Load the map's "bit field 2".
4675  // Retrieve elements_kind from bit field 2.
4676  __ and_(rbx, Immediate(Map::kElementsKindMask));
4677  __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
4679  DeoptimizeIf(not_equal, instr->environment());
4680  }
4681 
4682  // Set up the parameters to the stub/runtime call.
4683  __ PushHeapObject(literals);
4684  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
4685  // Boilerplate already exists, constant elements are never accessed.
4686  // Pass an empty fixed array.
4687  __ Push(isolate()->factory()->empty_fixed_array());
4688 
4689  // Pick the right runtime function or stub to call.
4690  int length = instr->hydrogen()->length();
4691  if (instr->hydrogen()->IsCopyOnWrite()) {
4692  ASSERT(instr->hydrogen()->depth() == 1);
4695  FastCloneShallowArrayStub stub(mode, length);
4696  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4697  } else if (instr->hydrogen()->depth() > 1) {
4698  CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4700  CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4701  } else {
4703  boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4706  FastCloneShallowArrayStub stub(mode, length);
4707  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4708  }
4709 }
4710 
4711 
4712 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4713  Register result,
4714  Register source,
4715  int* offset) {
4716  ASSERT(!source.is(rcx));
4717  ASSERT(!result.is(rcx));
4718 
4719  // Only elements backing stores for non-COW arrays need to be copied.
4720  Handle<FixedArrayBase> elements(object->elements());
4721  bool has_elements = elements->length() > 0 &&
4722  elements->map() != isolate()->heap()->fixed_cow_array_map();
4723 
4724  // Increase the offset so that subsequent objects end up right after
4725  // this object and its backing store.
4726  int object_offset = *offset;
4727  int object_size = object->map()->instance_size();
4728  int elements_offset = *offset + object_size;
4729  int elements_size = has_elements ? elements->Size() : 0;
4730  *offset += object_size + elements_size;
4731 
4732  // Copy object header.
4733  ASSERT(object->properties()->length() == 0);
4734  int inobject_properties = object->map()->inobject_properties();
4735  int header_size = object_size - inobject_properties * kPointerSize;
4736  for (int i = 0; i < header_size; i += kPointerSize) {
4737  if (has_elements && i == JSObject::kElementsOffset) {
4738  __ lea(rcx, Operand(result, elements_offset));
4739  } else {
4740  __ movq(rcx, FieldOperand(source, i));
4741  }
4742  __ movq(FieldOperand(result, object_offset + i), rcx);
4743  }
4744 
4745  // Copy in-object properties.
4746  for (int i = 0; i < inobject_properties; i++) {
4747  int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4748  Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4749  if (value->IsJSObject()) {
4750  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4751  __ lea(rcx, Operand(result, *offset));
4752  __ movq(FieldOperand(result, total_offset), rcx);
4753  __ LoadHeapObject(source, value_object);
4754  EmitDeepCopy(value_object, result, source, offset);
4755  } else if (value->IsHeapObject()) {
4756  __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
4757  __ movq(FieldOperand(result, total_offset), rcx);
4758  } else {
4759  __ movq(rcx, value, RelocInfo::NONE);
4760  __ movq(FieldOperand(result, total_offset), rcx);
4761  }
4762  }
4763 
4764  if (has_elements) {
4765  // Copy elements backing store header.
4766  __ LoadHeapObject(source, elements);
4767  for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4768  __ movq(rcx, FieldOperand(source, i));
4769  __ movq(FieldOperand(result, elements_offset + i), rcx);
4770  }
4771 
4772  // Copy elements backing store content.
4773  int elements_length = elements->length();
4774  if (elements->IsFixedDoubleArray()) {
4775  Handle<FixedDoubleArray> double_array =
4777  for (int i = 0; i < elements_length; i++) {
4778  int64_t value = double_array->get_representation(i);
4779  int total_offset =
4780  elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4781  __ movq(rcx, value, RelocInfo::NONE);
4782  __ movq(FieldOperand(result, total_offset), rcx);
4783  }
4784  } else if (elements->IsFixedArray()) {
4785  Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
4786  for (int i = 0; i < elements_length; i++) {
4787  int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4788  Handle<Object> value(fast_elements->get(i));
4789  if (value->IsJSObject()) {
4790  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4791  __ lea(rcx, Operand(result, *offset));
4792  __ movq(FieldOperand(result, total_offset), rcx);
4793  __ LoadHeapObject(source, value_object);
4794  EmitDeepCopy(value_object, result, source, offset);
4795  } else if (value->IsHeapObject()) {
4796  __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
4797  __ movq(FieldOperand(result, total_offset), rcx);
4798  } else {
4799  __ movq(rcx, value, RelocInfo::NONE);
4800  __ movq(FieldOperand(result, total_offset), rcx);
4801  }
4802  }
4803  } else {
4804  UNREACHABLE();
4805  }
4806  }
4807 }
4808 
4809 
4810 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4811  int size = instr->hydrogen()->total_size();
4812  ElementsKind boilerplate_elements_kind =
4813  instr->hydrogen()->boilerplate()->GetElementsKind();
4814 
4815  // Deopt if the array literal boilerplate ElementsKind is of a type different
4816  // than the expected one. The check isn't necessary if the boilerplate has
4817  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4819  boilerplate_elements_kind, true)) {
4820  __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
4822  // Load the map's "bit field 2".
4824  // Retrieve elements_kind from bit field 2.
4825  __ and_(rcx, Immediate(Map::kElementsKindMask));
4826  __ cmpb(rcx, Immediate(boilerplate_elements_kind <<
4828  DeoptimizeIf(not_equal, instr->environment());
4829  }
4830 
4831  // Allocate all objects that are part of the literal in one big
4832  // allocation. This avoids multiple limit checks.
4833  Label allocated, runtime_allocate;
4834  __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
4835  __ jmp(&allocated);
4836 
4837  __ bind(&runtime_allocate);
4838  __ Push(Smi::FromInt(size));
4839  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4840 
4841  __ bind(&allocated);
4842  int offset = 0;
4843  __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
4844  EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
4845  ASSERT_EQ(size, offset);
4846 }
4847 
4848 
4849 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4850  Handle<FixedArray> literals(instr->environment()->closure()->literals());
4851  Handle<FixedArray> constant_properties =
4852  instr->hydrogen()->constant_properties();
4853 
4854  // Set up the parameters to the stub/runtime call.
4855  __ PushHeapObject(literals);
4856  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
4857  __ Push(constant_properties);
4858  int flags = instr->hydrogen()->fast_elements()
4861  flags |= instr->hydrogen()->has_function()
4864  __ Push(Smi::FromInt(flags));
4865 
4866  // Pick the right runtime function or stub to call.
4867  int properties_count = constant_properties->length() / 2;
4868  if (instr->hydrogen()->depth() > 1) {
4869  CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4870  } else if (flags != ObjectLiteral::kFastElements ||
4872  CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4873  } else {
4874  FastCloneShallowObjectStub stub(properties_count);
4875  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4876  }
4877 }
4878 
4879 
4880 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4881  ASSERT(ToRegister(instr->value()).is(rax));
4882  __ push(rax);
4883  CallRuntime(Runtime::kToFastProperties, 1, instr);
4884 }
4885 
4886 
4887 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4888  Label materialized;
4889  // Registers will be used as follows:
4890  // rcx = literals array.
4891  // rbx = regexp literal.
4892  // rax = regexp literal clone.
4893  int literal_offset =
4894  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
4895  __ LoadHeapObject(rcx, instr->hydrogen()->literals());
4896  __ movq(rbx, FieldOperand(rcx, literal_offset));
4897  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
4898  __ j(not_equal, &materialized, Label::kNear);
4899 
4900  // Create regexp literal using runtime function
4901  // Result will be in rax.
4902  __ push(rcx);
4903  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
4904  __ Push(instr->hydrogen()->pattern());
4905  __ Push(instr->hydrogen()->flags());
4906  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4907  __ movq(rbx, rax);
4908 
4909  __ bind(&materialized);
4911  Label allocated, runtime_allocate;
4912  __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
4913  __ jmp(&allocated);
4914 
4915  __ bind(&runtime_allocate);
4916  __ push(rbx);
4917  __ Push(Smi::FromInt(size));
4918  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4919  __ pop(rbx);
4920 
4921  __ bind(&allocated);
4922  // Copy the content into the newly allocated memory.
4923  // (Unroll copy loop once for better throughput).
4924  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4925  __ movq(rdx, FieldOperand(rbx, i));
4926  __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
4927  __ movq(FieldOperand(rax, i), rdx);
4928  __ movq(FieldOperand(rax, i + kPointerSize), rcx);
4929  }
4930  if ((size % (2 * kPointerSize)) != 0) {
4931  __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
4932  __ movq(FieldOperand(rax, size - kPointerSize), rdx);
4933  }
4934 }
4935 
4936 
4937 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4938  // Use the fast case closure allocation code that allocates in new
4939  // space for nested functions that don't need literals cloning.
4940  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4941  bool pretenure = instr->hydrogen()->pretenure();
4942  if (!pretenure && shared_info->num_literals() == 0) {
4943  FastNewClosureStub stub(shared_info->language_mode());
4944  __ Push(shared_info);
4945  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4946  } else {
4947  __ push(rsi);
4948  __ Push(shared_info);
4949  __ PushRoot(pretenure ?
4950  Heap::kTrueValueRootIndex :
4951  Heap::kFalseValueRootIndex);
4952  CallRuntime(Runtime::kNewClosure, 3, instr);
4953  }
4954 }
4955 
4956 
4957 void LCodeGen::DoTypeof(LTypeof* instr) {
4958  LOperand* input = instr->value();
4959  EmitPushTaggedOperand(input);
4960  CallRuntime(Runtime::kTypeof, 1, instr);
4961 }
4962 
4963 
4964 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
4965  ASSERT(!operand->IsDoubleRegister());
4966  if (operand->IsConstantOperand()) {
4967  Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
4968  if (object->IsSmi()) {
4969  __ Push(Handle<Smi>::cast(object));
4970  } else {
4971  __ PushHeapObject(Handle<HeapObject>::cast(object));
4972  }
4973  } else if (operand->IsRegister()) {
4974  __ push(ToRegister(operand));
4975  } else {
4976  __ push(ToOperand(operand));
4977  }
4978 }
4979 
4980 
4981 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4982  Register input = ToRegister(instr->value());
4983  int true_block = chunk_->LookupDestination(instr->true_block_id());
4984  int false_block = chunk_->LookupDestination(instr->false_block_id());
4985  Label* true_label = chunk_->GetAssemblyLabel(true_block);
4986  Label* false_label = chunk_->GetAssemblyLabel(false_block);
4987 
4988  Condition final_branch_condition =
4989  EmitTypeofIs(true_label, false_label, input, instr->type_literal());
4990  if (final_branch_condition != no_condition) {
4991  EmitBranch(true_block, false_block, final_branch_condition);
4992  }
4993 }
4994 
4995 
4996 Condition LCodeGen::EmitTypeofIs(Label* true_label,
4997  Label* false_label,
4998  Register input,
4999  Handle<String> type_name) {
5000  Condition final_branch_condition = no_condition;
5001  if (type_name->Equals(heap()->number_symbol())) {
5002  __ JumpIfSmi(input, true_label);
5003  __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
5004  Heap::kHeapNumberMapRootIndex);
5005 
5006  final_branch_condition = equal;
5007 
5008  } else if (type_name->Equals(heap()->string_symbol())) {
5009  __ JumpIfSmi(input, false_label);
5010  __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5011  __ j(above_equal, false_label);
5012  __ testb(FieldOperand(input, Map::kBitFieldOffset),
5013  Immediate(1 << Map::kIsUndetectable));
5014  final_branch_condition = zero;
5015 
5016  } else if (type_name->Equals(heap()->boolean_symbol())) {
5017  __ CompareRoot(input, Heap::kTrueValueRootIndex);
5018  __ j(equal, true_label);
5019  __ CompareRoot(input, Heap::kFalseValueRootIndex);
5020  final_branch_condition = equal;
5021 
5022  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
5023  __ CompareRoot(input, Heap::kNullValueRootIndex);
5024  final_branch_condition = equal;
5025 
5026  } else if (type_name->Equals(heap()->undefined_symbol())) {
5027  __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5028  __ j(equal, true_label);
5029  __ JumpIfSmi(input, false_label);
5030  // Check for undetectable objects => true.
5031  __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
5032  __ testb(FieldOperand(input, Map::kBitFieldOffset),
5033  Immediate(1 << Map::kIsUndetectable));
5034  final_branch_condition = not_zero;
5035 
5036  } else if (type_name->Equals(heap()->function_symbol())) {
5038  __ JumpIfSmi(input, false_label);
5039  __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5040  __ j(equal, true_label);
5041  __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5042  final_branch_condition = equal;
5043 
5044  } else if (type_name->Equals(heap()->object_symbol())) {
5045  __ JumpIfSmi(input, false_label);
5046  if (!FLAG_harmony_typeof) {
5047  __ CompareRoot(input, Heap::kNullValueRootIndex);
5048  __ j(equal, true_label);
5049  }
5050  __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5051  __ j(below, false_label);
5052  __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5053  __ j(above, false_label);
5054  // Check for undetectable objects => false.
5055  __ testb(FieldOperand(input, Map::kBitFieldOffset),
5056  Immediate(1 << Map::kIsUndetectable));
5057  final_branch_condition = zero;
5058 
5059  } else {
5060  __ jmp(false_label);
5061  }
5062 
5063  return final_branch_condition;
5064 }
5065 
5066 
5067 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5068  Register temp = ToRegister(instr->temp());
5069  int true_block = chunk_->LookupDestination(instr->true_block_id());
5070  int false_block = chunk_->LookupDestination(instr->false_block_id());
5071 
5072  EmitIsConstructCall(temp);
5073  EmitBranch(true_block, false_block, equal);
5074 }
5075 
5076 
5077 void LCodeGen::EmitIsConstructCall(Register temp) {
5078  // Get the frame pointer for the calling frame.
5079  __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
5080 
5081  // Skip the arguments adaptor frame if it exists.
5082  Label check_frame_marker;
5083  __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5085  __ j(not_equal, &check_frame_marker, Label::kNear);
5086  __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
5087 
5088  // Check the marker in the calling frame.
5089  __ bind(&check_frame_marker);
5090  __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5091  Smi::FromInt(StackFrame::CONSTRUCT));
5092 }
5093 
5094 
5095 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5096  // Ensure that we have enough space after the previous lazy-bailout
5097  // instruction for patching the code here.
5098  int current_pc = masm()->pc_offset();
5099  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5100  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5101  __ Nop(padding_size);
5102  }
5103 }
5104 
5105 
5106 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5107  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5108  last_lazy_deopt_pc_ = masm()->pc_offset();
5109  ASSERT(instr->HasEnvironment());
5110  LEnvironment* env = instr->environment();
5111  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5112  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5113 }
5114 
5115 
5116 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5117  DeoptimizeIf(no_condition, instr->environment());
5118 }
5119 
5120 
5121 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5122  LOperand* obj = instr->object();
5123  LOperand* key = instr->key();
5124  EmitPushTaggedOperand(obj);
5125  EmitPushTaggedOperand(key);
5126  ASSERT(instr->HasPointerMap());
5127  LPointerMap* pointers = instr->pointer_map();
5128  RecordPosition(pointers->position());
5129  // Create safepoint generator that will also ensure enough space in the
5130  // reloc info for patching in deoptimization (since this is invoking a
5131  // builtin)
5132  SafepointGenerator safepoint_generator(
5133  this, pointers, Safepoint::kLazyDeopt);
5134  __ Push(Smi::FromInt(strict_mode_flag()));
5135  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
5136 }
5137 
5138 
5139 void LCodeGen::DoIn(LIn* instr) {
5140  LOperand* obj = instr->object();
5141  LOperand* key = instr->key();
5142  EmitPushTaggedOperand(key);
5143  EmitPushTaggedOperand(obj);
5144  ASSERT(instr->HasPointerMap());
5145  LPointerMap* pointers = instr->pointer_map();
5146  RecordPosition(pointers->position());
5147  SafepointGenerator safepoint_generator(
5148  this, pointers, Safepoint::kLazyDeopt);
5149  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
5150 }
5151 
5152 
5153 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5154  PushSafepointRegistersScope scope(this);
5156  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5157  RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5158  ASSERT(instr->HasEnvironment());
5159  LEnvironment* env = instr->environment();
5160  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5161 }
5162 
5163 
5164 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5165  class DeferredStackCheck: public LDeferredCode {
5166  public:
5167  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5168  : LDeferredCode(codegen), instr_(instr) { }
5169  virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5170  virtual LInstruction* instr() { return instr_; }
5171  private:
5172  LStackCheck* instr_;
5173  };
5174 
5175  ASSERT(instr->HasEnvironment());
5176  LEnvironment* env = instr->environment();
5177  // There is no LLazyBailout instruction for stack-checks. We have to
5178  // prepare for lazy deoptimization explicitly here.
5179  if (instr->hydrogen()->is_function_entry()) {
5180  // Perform stack overflow check.
5181  Label done;
5182  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5183  __ j(above_equal, &done, Label::kNear);
5184  StackCheckStub stub;
5185  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5186  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5187  last_lazy_deopt_pc_ = masm()->pc_offset();
5188  __ bind(&done);
5189  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5190  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5191  } else {
5192  ASSERT(instr->hydrogen()->is_backwards_branch());
5193  // Perform stack overflow check if this goto needs it before jumping.
5194  DeferredStackCheck* deferred_stack_check =
5195  new(zone()) DeferredStackCheck(this, instr);
5196  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5197  __ j(below, deferred_stack_check->entry());
5198  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5199  last_lazy_deopt_pc_ = masm()->pc_offset();
5200  __ bind(instr->done_label());
5201  deferred_stack_check->SetExit(instr->done_label());
5202  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5203  // Don't record a deoptimization index for the safepoint here.
5204  // This will be done explicitly when emitting call and the safepoint in
5205  // the deferred code.
5206  }
5207 }
5208 
5209 
5210 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5211  // This is a pseudo-instruction that ensures that the environment here is
5212  // properly registered for deoptimization and records the assembler's PC
5213  // offset.
5214  LEnvironment* environment = instr->environment();
5215  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5216  instr->SpilledDoubleRegisterArray());
5217 
5218  // If the environment were already registered, we would have no way of
5219  // backpatching it with the spill slot operands.
5220  ASSERT(!environment->HasBeenRegistered());
5221  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5222  ASSERT(osr_pc_offset_ == -1);
5223  osr_pc_offset_ = masm()->pc_offset();
5224 }
5225 
5226 
5227 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5228  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
5229  DeoptimizeIf(equal, instr->environment());
5230 
5231  Register null_value = rdi;
5232  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5233  __ cmpq(rax, null_value);
5234  DeoptimizeIf(equal, instr->environment());
5235 
5236  Condition cc = masm()->CheckSmi(rax);
5237  DeoptimizeIf(cc, instr->environment());
5238 
5240  __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
5241  DeoptimizeIf(below_equal, instr->environment());
5242 
5243  Label use_cache, call_runtime;
5244  __ CheckEnumCache(null_value, &call_runtime);
5245 
5247  __ jmp(&use_cache, Label::kNear);
5248 
5249  // Get the set of properties to enumerate.
5250  __ bind(&call_runtime);
5251  __ push(rax);
5252  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5253 
5254  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
5255  Heap::kMetaMapRootIndex);
5256  DeoptimizeIf(not_equal, instr->environment());
5257  __ bind(&use_cache);
5258 }
5259 
5260 
5261 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5262  Register map = ToRegister(instr->map());
5263  Register result = ToRegister(instr->result());
5264  Label load_cache, done;
5265  __ EnumLength(result, map);
5266  __ Cmp(result, Smi::FromInt(0));
5267  __ j(not_equal, &load_cache);
5268  __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5269  __ jmp(&done);
5270  __ bind(&load_cache);
5271  __ LoadInstanceDescriptors(map, result);
5272  __ movq(result,
5274  __ movq(result,
5275  FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5276  __ bind(&done);
5277  Condition cc = masm()->CheckSmi(result);
5278  DeoptimizeIf(cc, instr->environment());
5279 }
5280 
5281 
5282 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5283  Register object = ToRegister(instr->value());
5284  __ cmpq(ToRegister(instr->map()),
5286  DeoptimizeIf(not_equal, instr->environment());
5287 }
5288 
5289 
5290 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5291  Register object = ToRegister(instr->object());
5292  Register index = ToRegister(instr->index());
5293 
5294  Label out_of_object, done;
5295  __ SmiToInteger32(index, index);
5296  __ cmpl(index, Immediate(0));
5297  __ j(less, &out_of_object);
5298  __ movq(object, FieldOperand(object,
5299  index,
5302  __ jmp(&done, Label::kNear);
5303 
5304  __ bind(&out_of_object);
5305  __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
5306  __ negl(index);
5307  // Index is now equal to out of object property index plus 1.
5308  __ movq(object, FieldOperand(object,
5309  index,
5311  FixedArray::kHeaderSize - kPointerSize));
5312  __ bind(&done);
5313 }
5314 
5315 
5316 #undef __
5317 
5318 } } // namespace v8::internal
5319 
5320 #endif // V8_TARGET_ARCH_X64
byte * Address
Definition: globals.h:157
const Register rdx
static const int kElementsKindMask
Definition: objects.h:5185
const int kMinInt
Definition: globals.h:211
static const int kBitFieldOffset
Definition: objects.h:5160
static LGap * cast(LInstruction *instr)
Definition: lithium-arm.h:327
const intptr_t kSmiTagMask
Definition: v8.h:4016
static const int kCodeEntryOffset
Definition: objects.h:6182
static const int kMaxAsciiCharCode
Definition: objects.h:7327
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:6183
static int SlotOffset(int index)
Definition: contexts.h:425
static const int kEnumCacheOffset
Definition: objects.h:2632
static Smi * FromInt(int value)
Definition: objects-inl.h:981
bool IsFastObjectElementsKind(ElementsKind kind)
const Register rbp
const int KB
Definition: globals.h:207
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
Definition: handles.h:81
static const int kGlobalReceiverOffset
Definition: objects.h:6288
const Register rsi
static const int kNativeByteOffset
Definition: objects.h:5971
int int32_t
Definition: unicode.cc:47
static XMMRegister FromAllocationIndex(int index)
static bool IsSupported(CpuFeature f)
static const int kStrictModeBitWithinByte
Definition: objects.h:5957
static const int kExternalPointerOffset
Definition: objects.h:3741
static const int kSize
Definition: objects.h:6625
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
Definition: globals.h:232
static const int kInstanceSizeOffset
Definition: objects.h:5147
static const int kInObjectFieldCount
Definition: objects.h:6679
static const int kMaximumSlots
Definition: code-stubs.h:344
MemOperand GlobalObjectOperand()
static const int kInstanceClassNameOffset
Definition: objects.h:5800
static const int kUnusedPropertyFieldsOffset
Definition: objects.h:5159
int WhichPowerOf2(uint32_t x)
Definition: utils.h:56
bool is_uint32(int64_t x)
Definition: assembler-x64.h:48
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:6187
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
Definition: handles.cc:326
static const int kHashFieldOffset
Definition: objects.h:7319
Condition ReverseCondition(Condition cond)
#define IN
const uint32_t kSlotsZapValue
Definition: v8globals.h:83
#define UNREACHABLE()
Definition: checks.h:50
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7318
static const int kValueOffset
Definition: objects.h:1342
const uint32_t kHoleNanUpper32
Definition: v8globals.h:469
const XMMRegister xmm1
const int kPointerSize
Definition: globals.h:220
static void MaybeCallEntryHook(MacroAssembler *masm)
Operand FieldOperand(Register object, int offset)
const int kHeapObjectTag
Definition: v8.h:4009
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:271
const Register rbx
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
const Register rsp
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
#define __
static const int kCacheStampOffset
Definition: objects.h:6476
static const int kPropertiesOffset
Definition: objects.h:2171
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
const Register rax
static const int kInObjectPropertiesOffset
Definition: objects.h:5149
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
Definition: objects.h:2439
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
const Register rdi
static const int kElementsOffset
Definition: objects.h:2172
static const int kNativeBitWithinByte
Definition: objects.h:5963
static const int kContainsCachedArrayIndexMask
Definition: objects.h:7374
bool IsPowerOf2(T x)
Definition: utils.h:50
static Vector< T > New(int length)
Definition: utils.h:370
int ElementsKindToShiftSize(ElementsKind elements_kind)
Definition: lithium.cc:230
Vector< const char > CStrVector(const char *data)
Definition: utils.h:526
static int OffsetOfElementAt(int index)
Definition: objects.h:2356
static const int kLengthOffset
Definition: objects.h:8332
static int SizeFor(int length)
Definition: objects.h:2353
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:2296
static const int kMapOffset
Definition: objects.h:1261
static const int kValueOffset
Definition: objects.h:6468
bool is(Register reg) const
static const int kLengthOffset
Definition: objects.h:2295
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:457
static const int kHasNonInstancePrototype
Definition: objects.h:5167
const Register kScratchRegister
ElementsKind GetInitialFastElementsKind()
static const uint32_t kSignMask
Definition: objects.h:1351
static const int kStrictModeByteOffset
Definition: objects.h:5967
const int kSmiTagSize
Definition: v8.h:4015
const Register rcx
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
static const int kElementsKindShift
Definition: objects.h:5181
static const int kConstructorOffset
Definition: objects.h:5127
static double canonical_not_the_hole_nan_as_double()
Definition: objects-inl.h:1776
#define ASSERT_NE(v1, v2)
Definition: checks.h:272
static const int kIsUndetectable
Definition: objects.h:5171
static const int kHeaderSize
Definition: objects.h:2173
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPrototypeOffset
Definition: objects.h:5126
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kMaxLength
Definition: objects.h:7386
static const int kValueOffset
Definition: objects.h:6385
static const int kNativeContextOffset
Definition: objects.h:6286
static const int kHashShift
Definition: objects.h:7341
const XMMRegister xmm2
static const int kSharedFunctionInfoOffset
Definition: objects.h:6185
Register ToRegister(int num)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
static const int kMaxValue
Definition: objects.h:1050
static const int kBitField2Offset
Definition: objects.h:5161
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
Definition: ic.cc:2565
#define ARRAY_SIZE(a)
Definition: globals.h:281
static const int kExponentOffset
Definition: objects.h:1348
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1258
static JSObject * cast(Object *obj)
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
Definition: objects.h:5158
virtual void BeforeCall(int call_size) const
static const int kPreAllocatedPropertyFieldsOffset
Definition: objects.h:5152
const XMMRegister xmm0