v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_X64)
31 
33 #include "code-stubs.h"
34 #include "stub-cache.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 // When invoking builtins, we need to record the safepoint in the middle of
41 // the invoke instruction sequence generated by the macro assembler.
42 class SafepointGenerator : public CallWrapper {
43  public:
44  SafepointGenerator(LCodeGen* codegen,
45  LPointerMap* pointers,
46  Safepoint::DeoptMode mode)
47  : codegen_(codegen),
48  pointers_(pointers),
49  deopt_mode_(mode) { }
50  virtual ~SafepointGenerator() { }
51 
52  virtual void BeforeCall(int call_size) const {
53  codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
54  }
55 
56  virtual void AfterCall() const {
57  codegen_->RecordSafepoint(pointers_, deopt_mode_);
58  }
59 
60  private:
61  LCodeGen* codegen_;
62  LPointerMap* pointers_;
63  Safepoint::DeoptMode deopt_mode_;
64 };
65 
66 
67 #define __ masm()->
68 
69 bool LCodeGen::GenerateCode() {
70  HPhase phase("Z_Code generation", chunk());
71  ASSERT(is_unused());
72  status_ = GENERATING;
73 
74  // Open a frame scope to indicate that there is a frame on the stack. The
75  // MANUAL indicates that the scope shouldn't actually generate code to set up
76  // the frame (that is done in GeneratePrologue).
77  FrameScope frame_scope(masm_, StackFrame::MANUAL);
78 
79  return GeneratePrologue() &&
80  GenerateBody() &&
81  GenerateDeferredCode() &&
82  GenerateJumpTable() &&
83  GenerateSafepointTable();
84 }
85 
86 
87 void LCodeGen::FinishCode(Handle<Code> code) {
88  ASSERT(is_done());
89  code->set_stack_slots(GetStackSlotCount());
90  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
91  PopulateDeoptimizationData(code);
92 }
93 
94 
95 void LCodeGen::Abort(const char* format, ...) {
96  if (FLAG_trace_bailout) {
97  SmartArrayPointer<char> name(
98  info()->shared_info()->DebugName()->ToCString());
99  PrintF("Aborting LCodeGen in @\"%s\": ", *name);
100  va_list arguments;
101  va_start(arguments, format);
102  OS::VPrint(format, arguments);
103  va_end(arguments);
104  PrintF("\n");
105  }
106  status_ = ABORTED;
107 }
108 
109 
110 void LCodeGen::Comment(const char* format, ...) {
111  if (!FLAG_code_comments) return;
112  char buffer[4 * KB];
113  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
114  va_list arguments;
115  va_start(arguments, format);
116  builder.AddFormattedList(format, arguments);
117  va_end(arguments);
118 
119  // Copy the string before recording it in the assembler to avoid
120  // issues when the stack allocated buffer goes out of scope.
121  int length = builder.position();
122  Vector<char> copy = Vector<char>::New(length + 1);
123  memcpy(copy.start(), builder.Finalize(), copy.length());
124  masm()->RecordComment(copy.start());
125 }
126 
127 
128 bool LCodeGen::GeneratePrologue() {
129  ASSERT(is_generating());
130 
131 #ifdef DEBUG
132  if (strlen(FLAG_stop_at) > 0 &&
133  info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
134  __ int3();
135  }
136 #endif
137 
138  // Strict mode functions need to replace the receiver with undefined
139  // when called as functions (without an explicit receiver
140  // object). rcx is zero for method calls and non-zero for function
141  // calls.
142  if (!info_->is_classic_mode() || info_->is_native()) {
143  Label ok;
144  __ testq(rcx, rcx);
145  __ j(zero, &ok, Label::kNear);
146  // +1 for return address.
147  int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
148  __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
149  __ movq(Operand(rsp, receiver_offset), kScratchRegister);
150  __ bind(&ok);
151  }
152 
153  __ push(rbp); // Caller's frame pointer.
154  __ movq(rbp, rsp);
155  __ push(rsi); // Callee's context.
156  __ push(rdi); // Callee's JS function.
157 
158  // Reserve space for the stack slots needed by the code.
159  int slots = GetStackSlotCount();
160  if (slots > 0) {
161  if (FLAG_debug_code) {
162  __ Set(rax, slots);
164  Label loop;
165  __ bind(&loop);
166  __ push(kScratchRegister);
167  __ decl(rax);
168  __ j(not_zero, &loop);
169  } else {
170  __ subq(rsp, Immediate(slots * kPointerSize));
171 #ifdef _MSC_VER
172  // On windows, you may not access the stack more than one page below
173  // the most recently mapped page. To make the allocated area randomly
174  // accessible, we write to each page in turn (the value is irrelevant).
175  const int kPageSize = 4 * KB;
176  for (int offset = slots * kPointerSize - kPageSize;
177  offset > 0;
178  offset -= kPageSize) {
179  __ movq(Operand(rsp, offset), rax);
180  }
181 #endif
182  }
183  }
184 
185  // Possibly allocate a local context.
186  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
187  if (heap_slots > 0) {
188  Comment(";;; Allocate local context");
189  // Argument to NewContext is the function, which is still in rdi.
190  __ push(rdi);
191  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
192  FastNewContextStub stub(heap_slots);
193  __ CallStub(&stub);
194  } else {
195  __ CallRuntime(Runtime::kNewFunctionContext, 1);
196  }
197  RecordSafepoint(Safepoint::kNoLazyDeopt);
198  // Context is returned in both rax and rsi. It replaces the context
199  // passed to us. It's saved in the stack and kept live in rsi.
201 
202  // Copy any necessary parameters into the context.
203  int num_parameters = scope()->num_parameters();
204  for (int i = 0; i < num_parameters; i++) {
205  Variable* var = scope()->parameter(i);
206  if (var->IsContextSlot()) {
207  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
208  (num_parameters - 1 - i) * kPointerSize;
209  // Load parameter from stack.
210  __ movq(rax, Operand(rbp, parameter_offset));
211  // Store it in the context.
212  int context_offset = Context::SlotOffset(var->index());
213  __ movq(Operand(rsi, context_offset), rax);
214  // Update the write barrier. This clobbers rax and rbx.
215  __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
216  }
217  }
218  Comment(";;; End allocate local context");
219  }
220 
221  // Trace the call.
222  if (FLAG_trace) {
223  __ CallRuntime(Runtime::kTraceEnter, 0);
224  }
225  return !is_aborted();
226 }
227 
228 
229 bool LCodeGen::GenerateBody() {
230  ASSERT(is_generating());
231  bool emit_instructions = true;
232  for (current_instruction_ = 0;
233  !is_aborted() && current_instruction_ < instructions_->length();
234  current_instruction_++) {
235  LInstruction* instr = instructions_->at(current_instruction_);
236  if (instr->IsLabel()) {
237  LLabel* label = LLabel::cast(instr);
238  emit_instructions = !label->HasReplacement();
239  }
240 
241  if (emit_instructions) {
242  Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
243  instr->CompileToNative(this);
244  }
245  }
246  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
247  return !is_aborted();
248 }
249 
250 
251 bool LCodeGen::GenerateJumpTable() {
252  for (int i = 0; i < jump_table_.length(); i++) {
253  __ bind(&jump_table_[i].label);
254  __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
255  }
256  return !is_aborted();
257 }
258 
259 
260 bool LCodeGen::GenerateDeferredCode() {
261  ASSERT(is_generating());
262  if (deferred_.length() > 0) {
263  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
264  LDeferredCode* code = deferred_[i];
265  __ bind(code->entry());
266  Comment(";;; Deferred code @%d: %s.",
267  code->instruction_index(),
268  code->instr()->Mnemonic());
269  code->Generate();
270  __ jmp(code->exit());
271  }
272  }
273 
274  // Deferred code is the last part of the instruction sequence. Mark
275  // the generated code as done unless we bailed out.
276  if (!is_aborted()) status_ = DONE;
277  return !is_aborted();
278 }
279 
280 
281 bool LCodeGen::GenerateSafepointTable() {
282  ASSERT(is_done());
283  safepoints_.Emit(masm(), GetStackSlotCount());
284  return !is_aborted();
285 }
286 
287 
288 Register LCodeGen::ToRegister(int index) const {
289  return Register::FromAllocationIndex(index);
290 }
291 
292 
293 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
294  return XMMRegister::FromAllocationIndex(index);
295 }
296 
297 
298 Register LCodeGen::ToRegister(LOperand* op) const {
299  ASSERT(op->IsRegister());
300  return ToRegister(op->index());
301 }
302 
303 
304 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
305  ASSERT(op->IsDoubleRegister());
306  return ToDoubleRegister(op->index());
307 }
308 
309 
310 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
311  return op->IsConstantOperand() &&
312  chunk_->LookupLiteralRepresentation(op).IsInteger32();
313 }
314 
315 
316 bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
317  return op->IsConstantOperand() &&
318  chunk_->LookupLiteralRepresentation(op).IsTagged();
319 }
320 
321 
322 int LCodeGen::ToInteger32(LConstantOperand* op) const {
323  Handle<Object> value = chunk_->LookupLiteral(op);
324  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
325  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
326  value->Number());
327  return static_cast<int32_t>(value->Number());
328 }
329 
330 
331 double LCodeGen::ToDouble(LConstantOperand* op) const {
332  Handle<Object> value = chunk_->LookupLiteral(op);
333  return value->Number();
334 }
335 
336 
337 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
338  Handle<Object> literal = chunk_->LookupLiteral(op);
339  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
340  return literal;
341 }
342 
343 
344 Operand LCodeGen::ToOperand(LOperand* op) const {
345  // Does not handle registers. In X64 assembler, plain registers are not
346  // representable as an Operand.
347  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
348  int index = op->index();
349  if (index >= 0) {
350  // Local or spill slot. Skip the frame pointer, function, and
351  // context in the fixed part of the frame.
352  return Operand(rbp, -(index + 3) * kPointerSize);
353  } else {
354  // Incoming parameter. Skip the return address.
355  return Operand(rbp, -(index - 1) * kPointerSize);
356  }
357 }
358 
359 
360 void LCodeGen::WriteTranslation(LEnvironment* environment,
361  Translation* translation) {
362  if (environment == NULL) return;
363 
364  // The translation includes one command per value in the environment.
365  int translation_size = environment->values()->length();
366  // The output frame height does not include the parameters.
367  int height = translation_size - environment->parameter_count();
368 
369  WriteTranslation(environment->outer(), translation);
370  int closure_id = DefineDeoptimizationLiteral(environment->closure());
371  switch (environment->frame_type()) {
372  case JS_FUNCTION:
373  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
374  break;
375  case JS_CONSTRUCT:
376  translation->BeginConstructStubFrame(closure_id, translation_size);
377  break;
378  case ARGUMENTS_ADAPTOR:
379  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
380  break;
381  default:
382  UNREACHABLE();
383  }
384  for (int i = 0; i < translation_size; ++i) {
385  LOperand* value = environment->values()->at(i);
386  // spilled_registers_ and spilled_double_registers_ are either
387  // both NULL or both set.
388  if (environment->spilled_registers() != NULL && value != NULL) {
389  if (value->IsRegister() &&
390  environment->spilled_registers()[value->index()] != NULL) {
391  translation->MarkDuplicate();
392  AddToTranslation(translation,
393  environment->spilled_registers()[value->index()],
394  environment->HasTaggedValueAt(i));
395  } else if (
396  value->IsDoubleRegister() &&
397  environment->spilled_double_registers()[value->index()] != NULL) {
398  translation->MarkDuplicate();
399  AddToTranslation(
400  translation,
401  environment->spilled_double_registers()[value->index()],
402  false);
403  }
404  }
405 
406  AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
407  }
408 }
409 
410 
411 void LCodeGen::AddToTranslation(Translation* translation,
412  LOperand* op,
413  bool is_tagged) {
414  if (op == NULL) {
415  // TODO(twuerthinger): Introduce marker operands to indicate that this value
416  // is not present and must be reconstructed from the deoptimizer. Currently
417  // this is only used for the arguments object.
418  translation->StoreArgumentsObject();
419  } else if (op->IsStackSlot()) {
420  if (is_tagged) {
421  translation->StoreStackSlot(op->index());
422  } else {
423  translation->StoreInt32StackSlot(op->index());
424  }
425  } else if (op->IsDoubleStackSlot()) {
426  translation->StoreDoubleStackSlot(op->index());
427  } else if (op->IsArgument()) {
428  ASSERT(is_tagged);
429  int src_index = GetStackSlotCount() + op->index();
430  translation->StoreStackSlot(src_index);
431  } else if (op->IsRegister()) {
432  Register reg = ToRegister(op);
433  if (is_tagged) {
434  translation->StoreRegister(reg);
435  } else {
436  translation->StoreInt32Register(reg);
437  }
438  } else if (op->IsDoubleRegister()) {
439  XMMRegister reg = ToDoubleRegister(op);
440  translation->StoreDoubleRegister(reg);
441  } else if (op->IsConstantOperand()) {
442  Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
443  int src_index = DefineDeoptimizationLiteral(literal);
444  translation->StoreLiteral(src_index);
445  } else {
446  UNREACHABLE();
447  }
448 }
449 
450 
451 void LCodeGen::CallCodeGeneric(Handle<Code> code,
452  RelocInfo::Mode mode,
453  LInstruction* instr,
454  SafepointMode safepoint_mode,
455  int argc) {
456  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
457  ASSERT(instr != NULL);
458  LPointerMap* pointers = instr->pointer_map();
459  RecordPosition(pointers->position());
460  __ call(code, mode);
461  RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
462 
463  // Signal that we don't inline smi code before these stubs in the
464  // optimizing code generator.
465  if (code->kind() == Code::BINARY_OP_IC ||
466  code->kind() == Code::COMPARE_IC) {
467  __ nop();
468  }
469 }
470 
471 
472 void LCodeGen::CallCode(Handle<Code> code,
473  RelocInfo::Mode mode,
474  LInstruction* instr) {
475  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
476 }
477 
478 
479 void LCodeGen::CallRuntime(const Runtime::Function* function,
480  int num_arguments,
481  LInstruction* instr) {
482  ASSERT(instr != NULL);
483  ASSERT(instr->HasPointerMap());
484  LPointerMap* pointers = instr->pointer_map();
485  RecordPosition(pointers->position());
486 
487  __ CallRuntime(function, num_arguments);
488  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
489 }
490 
491 
492 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
493  int argc,
494  LInstruction* instr) {
496  __ CallRuntimeSaveDoubles(id);
497  RecordSafepointWithRegisters(
498  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
499 }
500 
501 
502 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
503  Safepoint::DeoptMode mode) {
504  if (!environment->HasBeenRegistered()) {
505  // Physical stack frame layout:
506  // -x ............. -4 0 ..................................... y
507  // [incoming arguments] [spill slots] [pushed outgoing arguments]
508 
509  // Layout of the environment:
510  // 0 ..................................................... size-1
511  // [parameters] [locals] [expression stack including arguments]
512 
513  // Layout of the translation:
514  // 0 ........................................................ size - 1 + 4
515  // [expression stack including arguments] [locals] [4 words] [parameters]
516  // |>------------ translation_size ------------<|
517 
518  int frame_count = 0;
519  int jsframe_count = 0;
520  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
521  ++frame_count;
522  if (e->frame_type() == JS_FUNCTION) {
523  ++jsframe_count;
524  }
525  }
526  Translation translation(&translations_, frame_count, jsframe_count,
527  environment->zone());
528  WriteTranslation(environment, &translation);
529  int deoptimization_index = deoptimizations_.length();
530  int pc_offset = masm()->pc_offset();
531  environment->Register(deoptimization_index,
532  translation.index(),
533  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
534  deoptimizations_.Add(environment, environment->zone());
535  }
536 }
537 
538 
539 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
540  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
541  ASSERT(environment->HasBeenRegistered());
542  int id = environment->deoptimization_index();
544  if (entry == NULL) {
545  Abort("bailout was not prepared");
546  return;
547  }
548 
549  if (cc == no_condition) {
550  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
551  } else {
552  // We often have several deopts to the same entry, reuse the last
553  // jump entry if this is the case.
554  if (jump_table_.is_empty() ||
555  jump_table_.last().address != entry) {
556  jump_table_.Add(JumpTableEntry(entry), zone());
557  }
558  __ j(cc, &jump_table_.last().label);
559  }
560 }
561 
562 
563 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
564  int length = deoptimizations_.length();
565  if (length == 0) return;
566  Handle<DeoptimizationInputData> data =
567  factory()->NewDeoptimizationInputData(length, TENURED);
568 
569  Handle<ByteArray> translations = translations_.CreateByteArray();
570  data->SetTranslationByteArray(*translations);
571  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
572 
573  Handle<FixedArray> literals =
574  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
575  for (int i = 0; i < deoptimization_literals_.length(); i++) {
576  literals->set(i, *deoptimization_literals_[i]);
577  }
578  data->SetLiteralArray(*literals);
579 
580  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
581  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
582 
583  // Populate the deoptimization entries.
584  for (int i = 0; i < length; i++) {
585  LEnvironment* env = deoptimizations_[i];
586  data->SetAstId(i, Smi::FromInt(env->ast_id()));
587  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
588  data->SetArgumentsStackHeight(i,
589  Smi::FromInt(env->arguments_stack_height()));
590  data->SetPc(i, Smi::FromInt(env->pc_offset()));
591  }
592  code->set_deoptimization_data(*data);
593 }
594 
595 
596 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
597  int result = deoptimization_literals_.length();
598  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
599  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
600  }
601  deoptimization_literals_.Add(literal, zone());
602  return result;
603 }
604 
605 
606 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
607  ASSERT(deoptimization_literals_.length() == 0);
608 
609  const ZoneList<Handle<JSFunction> >* inlined_closures =
610  chunk()->inlined_closures();
611 
612  for (int i = 0, length = inlined_closures->length();
613  i < length;
614  i++) {
615  DefineDeoptimizationLiteral(inlined_closures->at(i));
616  }
617 
618  inlined_function_count_ = deoptimization_literals_.length();
619 }
620 
621 
622 void LCodeGen::RecordSafepointWithLazyDeopt(
623  LInstruction* instr, SafepointMode safepoint_mode, int argc) {
624  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
625  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
626  } else {
627  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
628  RecordSafepointWithRegisters(
629  instr->pointer_map(), argc, Safepoint::kLazyDeopt);
630  }
631 }
632 
633 
634 void LCodeGen::RecordSafepoint(
635  LPointerMap* pointers,
636  Safepoint::Kind kind,
637  int arguments,
638  Safepoint::DeoptMode deopt_mode) {
639  ASSERT(kind == expected_safepoint_kind_);
640 
641  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
642 
643  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
644  kind, arguments, deopt_mode);
645  for (int i = 0; i < operands->length(); i++) {
646  LOperand* pointer = operands->at(i);
647  if (pointer->IsStackSlot()) {
648  safepoint.DefinePointerSlot(pointer->index(), zone());
649  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
650  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
651  }
652  }
653  if (kind & Safepoint::kWithRegisters) {
654  // Register rsi always contains a pointer to the context.
655  safepoint.DefinePointerRegister(rsi, zone());
656  }
657 }
658 
659 
660 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
661  Safepoint::DeoptMode deopt_mode) {
662  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
663 }
664 
665 
666 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
667  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
668  RecordSafepoint(&empty_pointers, deopt_mode);
669 }
670 
671 
672 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
673  int arguments,
674  Safepoint::DeoptMode deopt_mode) {
675  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
676 }
677 
678 
679 void LCodeGen::RecordPosition(int position) {
680  if (position == RelocInfo::kNoPosition) return;
681  masm()->positions_recorder()->RecordPosition(position);
682 }
683 
684 
685 void LCodeGen::DoLabel(LLabel* label) {
686  if (label->is_loop_header()) {
687  Comment(";;; B%d - LOOP entry", label->block_id());
688  } else {
689  Comment(";;; B%d", label->block_id());
690  }
691  __ bind(label->label());
692  current_block_ = label->block_id();
693  DoGap(label);
694 }
695 
696 
697 void LCodeGen::DoParallelMove(LParallelMove* move) {
698  resolver_.Resolve(move);
699 }
700 
701 
702 void LCodeGen::DoGap(LGap* gap) {
703  for (int i = LGap::FIRST_INNER_POSITION;
705  i++) {
706  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
707  LParallelMove* move = gap->GetParallelMove(inner_pos);
708  if (move != NULL) DoParallelMove(move);
709  }
710 }
711 
712 
713 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
714  DoGap(instr);
715 }
716 
717 
718 void LCodeGen::DoParameter(LParameter* instr) {
719  // Nothing to do.
720 }
721 
722 
723 void LCodeGen::DoCallStub(LCallStub* instr) {
724  ASSERT(ToRegister(instr->result()).is(rax));
725  switch (instr->hydrogen()->major_key()) {
726  case CodeStub::RegExpConstructResult: {
727  RegExpConstructResultStub stub;
728  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
729  break;
730  }
731  case CodeStub::RegExpExec: {
732  RegExpExecStub stub;
733  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
734  break;
735  }
736  case CodeStub::SubString: {
737  SubStringStub stub;
738  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
739  break;
740  }
741  case CodeStub::NumberToString: {
742  NumberToStringStub stub;
743  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
744  break;
745  }
746  case CodeStub::StringAdd: {
747  StringAddStub stub(NO_STRING_ADD_FLAGS);
748  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
749  break;
750  }
751  case CodeStub::StringCompare: {
752  StringCompareStub stub;
753  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
754  break;
755  }
756  case CodeStub::TranscendentalCache: {
757  TranscendentalCacheStub stub(instr->transcendental_type(),
759  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
760  break;
761  }
762  default:
763  UNREACHABLE();
764  }
765 }
766 
767 
768 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
769  // Nothing to do.
770 }
771 
772 
773 void LCodeGen::DoModI(LModI* instr) {
774  if (instr->hydrogen()->HasPowerOf2Divisor()) {
775  Register dividend = ToRegister(instr->InputAt(0));
776 
777  int32_t divisor =
778  HConstant::cast(instr->hydrogen()->right())->Integer32Value();
779 
780  if (divisor < 0) divisor = -divisor;
781 
782  Label positive_dividend, done;
783  __ testl(dividend, dividend);
784  __ j(not_sign, &positive_dividend, Label::kNear);
785  __ negl(dividend);
786  __ andl(dividend, Immediate(divisor - 1));
787  __ negl(dividend);
788  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
789  __ j(not_zero, &done, Label::kNear);
790  DeoptimizeIf(no_condition, instr->environment());
791  } else {
792  __ jmp(&done, Label::kNear);
793  }
794  __ bind(&positive_dividend);
795  __ andl(dividend, Immediate(divisor - 1));
796  __ bind(&done);
797  } else {
798  Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
799  Register left_reg = ToRegister(instr->InputAt(0));
800  Register right_reg = ToRegister(instr->InputAt(1));
801  Register result_reg = ToRegister(instr->result());
802 
803  ASSERT(left_reg.is(rax));
804  ASSERT(result_reg.is(rdx));
805  ASSERT(!right_reg.is(rax));
806  ASSERT(!right_reg.is(rdx));
807 
808  // Check for x % 0.
809  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
810  __ testl(right_reg, right_reg);
811  DeoptimizeIf(zero, instr->environment());
812  }
813 
814  __ testl(left_reg, left_reg);
815  __ j(zero, &remainder_eq_dividend, Label::kNear);
816  __ j(sign, &slow, Label::kNear);
817 
818  __ testl(right_reg, right_reg);
819  __ j(not_sign, &both_positive, Label::kNear);
820  // The sign of the divisor doesn't matter.
821  __ neg(right_reg);
822 
823  __ bind(&both_positive);
824  // If the dividend is smaller than the nonnegative
825  // divisor, the dividend is the result.
826  __ cmpl(left_reg, right_reg);
827  __ j(less, &remainder_eq_dividend, Label::kNear);
828 
829  // Check if the divisor is a PowerOfTwo integer.
830  Register scratch = ToRegister(instr->TempAt(0));
831  __ movl(scratch, right_reg);
832  __ subl(scratch, Immediate(1));
833  __ testl(scratch, right_reg);
834  __ j(not_zero, &do_subtraction, Label::kNear);
835  __ andl(left_reg, scratch);
836  __ jmp(&remainder_eq_dividend, Label::kNear);
837 
838  __ bind(&do_subtraction);
839  const int kUnfolds = 3;
840  // Try a few subtractions of the dividend.
841  __ movl(scratch, left_reg);
842  for (int i = 0; i < kUnfolds; i++) {
843  // Reduce the dividend by the divisor.
844  __ subl(left_reg, right_reg);
845  // Check if the dividend is less than the divisor.
846  __ cmpl(left_reg, right_reg);
847  __ j(less, &remainder_eq_dividend, Label::kNear);
848  }
849  __ movl(left_reg, scratch);
850 
851  // Slow case, using idiv instruction.
852  __ bind(&slow);
853  // Sign extend eax to edx.
854  // (We are using only the low 32 bits of the values.)
855  __ cdq();
856 
857  // Check for (0 % -x) that will produce negative zero.
858  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
859  Label positive_left;
860  Label done;
861  __ testl(left_reg, left_reg);
862  __ j(not_sign, &positive_left, Label::kNear);
863  __ idivl(right_reg);
864 
865  // Test the remainder for 0, because then the result would be -0.
866  __ testl(result_reg, result_reg);
867  __ j(not_zero, &done, Label::kNear);
868 
869  DeoptimizeIf(no_condition, instr->environment());
870  __ bind(&positive_left);
871  __ idivl(right_reg);
872  __ bind(&done);
873  } else {
874  __ idivl(right_reg);
875  }
876  __ jmp(&done, Label::kNear);
877 
878  __ bind(&remainder_eq_dividend);
879  __ movl(result_reg, left_reg);
880 
881  __ bind(&done);
882  }
883 }
884 
885 
886 void LCodeGen::DoDivI(LDivI* instr) {
887  LOperand* right = instr->InputAt(1);
888  ASSERT(ToRegister(instr->result()).is(rax));
889  ASSERT(ToRegister(instr->InputAt(0)).is(rax));
890  ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
891  ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
892 
893  Register left_reg = rax;
894 
895  // Check for x / 0.
896  Register right_reg = ToRegister(right);
897  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
898  __ testl(right_reg, right_reg);
899  DeoptimizeIf(zero, instr->environment());
900  }
901 
902  // Check for (0 / -x) that will produce negative zero.
903  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
904  Label left_not_zero;
905  __ testl(left_reg, left_reg);
906  __ j(not_zero, &left_not_zero, Label::kNear);
907  __ testl(right_reg, right_reg);
908  DeoptimizeIf(sign, instr->environment());
909  __ bind(&left_not_zero);
910  }
911 
912  // Check for (-kMinInt / -1).
913  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
914  Label left_not_min_int;
915  __ cmpl(left_reg, Immediate(kMinInt));
916  __ j(not_zero, &left_not_min_int, Label::kNear);
917  __ cmpl(right_reg, Immediate(-1));
918  DeoptimizeIf(zero, instr->environment());
919  __ bind(&left_not_min_int);
920  }
921 
922  // Sign extend to rdx.
923  __ cdq();
924  __ idivl(right_reg);
925 
926  // Deoptimize if remainder is not 0.
927  __ testl(rdx, rdx);
928  DeoptimizeIf(not_zero, instr->environment());
929 }
930 
931 
932 void LCodeGen::DoMulI(LMulI* instr) {
933  Register left = ToRegister(instr->InputAt(0));
934  LOperand* right = instr->InputAt(1);
935 
936  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
937  __ movl(kScratchRegister, left);
938  }
939 
940  bool can_overflow =
941  instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
942  if (right->IsConstantOperand()) {
943  int right_value = ToInteger32(LConstantOperand::cast(right));
944  if (right_value == -1) {
945  __ negl(left);
946  } else if (right_value == 0) {
947  __ xorl(left, left);
948  } else if (right_value == 2) {
949  __ addl(left, left);
950  } else if (!can_overflow) {
951  // If the multiplication is known to not overflow, we
952  // can use operations that don't set the overflow flag
953  // correctly.
954  switch (right_value) {
955  case 1:
956  // Do nothing.
957  break;
958  case 3:
959  __ leal(left, Operand(left, left, times_2, 0));
960  break;
961  case 4:
962  __ shll(left, Immediate(2));
963  break;
964  case 5:
965  __ leal(left, Operand(left, left, times_4, 0));
966  break;
967  case 8:
968  __ shll(left, Immediate(3));
969  break;
970  case 9:
971  __ leal(left, Operand(left, left, times_8, 0));
972  break;
973  case 16:
974  __ shll(left, Immediate(4));
975  break;
976  default:
977  __ imull(left, left, Immediate(right_value));
978  break;
979  }
980  } else {
981  __ imull(left, left, Immediate(right_value));
982  }
983  } else if (right->IsStackSlot()) {
984  __ imull(left, ToOperand(right));
985  } else {
986  __ imull(left, ToRegister(right));
987  }
988 
989  if (can_overflow) {
990  DeoptimizeIf(overflow, instr->environment());
991  }
992 
993  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
994  // Bail out if the result is supposed to be negative zero.
995  Label done;
996  __ testl(left, left);
997  __ j(not_zero, &done, Label::kNear);
998  if (right->IsConstantOperand()) {
999  if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
1000  DeoptimizeIf(no_condition, instr->environment());
1001  }
1002  } else if (right->IsStackSlot()) {
1003  __ orl(kScratchRegister, ToOperand(right));
1004  DeoptimizeIf(sign, instr->environment());
1005  } else {
1006  // Test the non-zero operand for negative sign.
1007  __ orl(kScratchRegister, ToRegister(right));
1008  DeoptimizeIf(sign, instr->environment());
1009  }
1010  __ bind(&done);
1011  }
1012 }
1013 
1014 
1015 void LCodeGen::DoBitI(LBitI* instr) {
1016  LOperand* left = instr->InputAt(0);
1017  LOperand* right = instr->InputAt(1);
1018  ASSERT(left->Equals(instr->result()));
1019  ASSERT(left->IsRegister());
1020 
1021  if (right->IsConstantOperand()) {
1022  int right_operand = ToInteger32(LConstantOperand::cast(right));
1023  switch (instr->op()) {
1024  case Token::BIT_AND:
1025  __ andl(ToRegister(left), Immediate(right_operand));
1026  break;
1027  case Token::BIT_OR:
1028  __ orl(ToRegister(left), Immediate(right_operand));
1029  break;
1030  case Token::BIT_XOR:
1031  __ xorl(ToRegister(left), Immediate(right_operand));
1032  break;
1033  default:
1034  UNREACHABLE();
1035  break;
1036  }
1037  } else if (right->IsStackSlot()) {
1038  switch (instr->op()) {
1039  case Token::BIT_AND:
1040  __ andl(ToRegister(left), ToOperand(right));
1041  break;
1042  case Token::BIT_OR:
1043  __ orl(ToRegister(left), ToOperand(right));
1044  break;
1045  case Token::BIT_XOR:
1046  __ xorl(ToRegister(left), ToOperand(right));
1047  break;
1048  default:
1049  UNREACHABLE();
1050  break;
1051  }
1052  } else {
1053  ASSERT(right->IsRegister());
1054  switch (instr->op()) {
1055  case Token::BIT_AND:
1056  __ andl(ToRegister(left), ToRegister(right));
1057  break;
1058  case Token::BIT_OR:
1059  __ orl(ToRegister(left), ToRegister(right));
1060  break;
1061  case Token::BIT_XOR:
1062  __ xorl(ToRegister(left), ToRegister(right));
1063  break;
1064  default:
1065  UNREACHABLE();
1066  break;
1067  }
1068  }
1069 }
1070 
1071 
1072 void LCodeGen::DoShiftI(LShiftI* instr) {
1073  LOperand* left = instr->InputAt(0);
1074  LOperand* right = instr->InputAt(1);
1075  ASSERT(left->Equals(instr->result()));
1076  ASSERT(left->IsRegister());
1077  if (right->IsRegister()) {
1078  ASSERT(ToRegister(right).is(rcx));
1079 
1080  switch (instr->op()) {
1081  case Token::SAR:
1082  __ sarl_cl(ToRegister(left));
1083  break;
1084  case Token::SHR:
1085  __ shrl_cl(ToRegister(left));
1086  if (instr->can_deopt()) {
1087  __ testl(ToRegister(left), ToRegister(left));
1088  DeoptimizeIf(negative, instr->environment());
1089  }
1090  break;
1091  case Token::SHL:
1092  __ shll_cl(ToRegister(left));
1093  break;
1094  default:
1095  UNREACHABLE();
1096  break;
1097  }
1098  } else {
1099  int value = ToInteger32(LConstantOperand::cast(right));
1100  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1101  switch (instr->op()) {
1102  case Token::SAR:
1103  if (shift_count != 0) {
1104  __ sarl(ToRegister(left), Immediate(shift_count));
1105  }
1106  break;
1107  case Token::SHR:
1108  if (shift_count == 0 && instr->can_deopt()) {
1109  __ testl(ToRegister(left), ToRegister(left));
1110  DeoptimizeIf(negative, instr->environment());
1111  } else {
1112  __ shrl(ToRegister(left), Immediate(shift_count));
1113  }
1114  break;
1115  case Token::SHL:
1116  if (shift_count != 0) {
1117  __ shll(ToRegister(left), Immediate(shift_count));
1118  }
1119  break;
1120  default:
1121  UNREACHABLE();
1122  break;
1123  }
1124  }
1125 }
1126 
1127 
1128 void LCodeGen::DoSubI(LSubI* instr) {
1129  LOperand* left = instr->InputAt(0);
1130  LOperand* right = instr->InputAt(1);
1131  ASSERT(left->Equals(instr->result()));
1132 
1133  if (right->IsConstantOperand()) {
1134  __ subl(ToRegister(left),
1135  Immediate(ToInteger32(LConstantOperand::cast(right))));
1136  } else if (right->IsRegister()) {
1137  __ subl(ToRegister(left), ToRegister(right));
1138  } else {
1139  __ subl(ToRegister(left), ToOperand(right));
1140  }
1141 
1142  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1143  DeoptimizeIf(overflow, instr->environment());
1144  }
1145 }
1146 
1147 
1148 void LCodeGen::DoConstantI(LConstantI* instr) {
1149  ASSERT(instr->result()->IsRegister());
1150  __ Set(ToRegister(instr->result()), instr->value());
1151 }
1152 
1153 
1154 void LCodeGen::DoConstantD(LConstantD* instr) {
1155  ASSERT(instr->result()->IsDoubleRegister());
1156  XMMRegister res = ToDoubleRegister(instr->result());
1157  double v = instr->value();
1158  uint64_t int_val = BitCast<uint64_t, double>(v);
1159  // Use xor to produce +0.0 in a fast and compact way, but avoid to
1160  // do so if the constant is -0.0.
1161  if (int_val == 0) {
1162  __ xorps(res, res);
1163  } else {
1164  Register tmp = ToRegister(instr->TempAt(0));
1165  __ Set(tmp, int_val);
1166  __ movq(res, tmp);
1167  }
1168 }
1169 
1170 
1171 void LCodeGen::DoConstantT(LConstantT* instr) {
1172  Handle<Object> value = instr->value();
1173  if (value->IsSmi()) {
1174  __ Move(ToRegister(instr->result()), value);
1175  } else {
1176  __ LoadHeapObject(ToRegister(instr->result()),
1177  Handle<HeapObject>::cast(value));
1178  }
1179 }
1180 
1181 
1182 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1183  Register result = ToRegister(instr->result());
1184  Register array = ToRegister(instr->InputAt(0));
1185  __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
1186 }
1187 
1188 
1189 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1190  Register result = ToRegister(instr->result());
1191  Register array = ToRegister(instr->InputAt(0));
1192  __ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
1193 }
1194 
1195 
1196 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1197  Register result = ToRegister(instr->result());
1198  Register input = ToRegister(instr->InputAt(0));
1199 
1200  // Load map into |result|.
1201  __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
1202  // Load the map's "bit field 2" into |result|. We only need the first byte.
1203  __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
1204  // Retrieve elements_kind from bit field 2.
1205  __ and_(result, Immediate(Map::kElementsKindMask));
1206  __ shr(result, Immediate(Map::kElementsKindShift));
1207 }
1208 
1209 
1210 void LCodeGen::DoValueOf(LValueOf* instr) {
1211  Register input = ToRegister(instr->InputAt(0));
1212  Register result = ToRegister(instr->result());
1213  ASSERT(input.is(result));
1214  Label done;
1215  // If the object is a smi return the object.
1216  __ JumpIfSmi(input, &done, Label::kNear);
1217 
1218  // If the object is not a value type, return the object.
1219  __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
1220  __ j(not_equal, &done, Label::kNear);
1221  __ movq(result, FieldOperand(input, JSValue::kValueOffset));
1222 
1223  __ bind(&done);
1224 }
1225 
1226 
1227 void LCodeGen::DoDateField(LDateField* instr) {
1228  Register object = ToRegister(instr->InputAt(0));
1229  Register result = ToRegister(instr->result());
1230  Smi* index = instr->index();
1231  Label runtime, done;
1232  ASSERT(object.is(result));
1233  ASSERT(object.is(rax));
1234 
1235 #ifdef DEBUG
1236  __ AbortIfSmi(object);
1237  __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1238  __ Assert(equal, "Trying to get date field from non-date.");
1239 #endif
1240 
1241  if (index->value() == 0) {
1242  __ movq(result, FieldOperand(object, JSDate::kValueOffset));
1243  } else {
1244  if (index->value() < JSDate::kFirstUncachedField) {
1245  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1246  __ movq(kScratchRegister, stamp);
1247  __ cmpq(kScratchRegister, FieldOperand(object,
1249  __ j(not_equal, &runtime, Label::kNear);
1250  __ movq(result, FieldOperand(object, JSDate::kValueOffset +
1251  kPointerSize * index->value()));
1252  __ jmp(&done);
1253  }
1254  __ bind(&runtime);
1255  __ PrepareCallCFunction(2);
1256 #ifdef _WIN64
1257  __ movq(rcx, object);
1258  __ movq(rdx, index, RelocInfo::NONE);
1259 #else
1260  __ movq(rdi, object);
1261  __ movq(rsi, index, RelocInfo::NONE);
1262 #endif
1263  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1265  __ bind(&done);
1266  }
1267 }
1268 
1269 
1270 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1271  LOperand* input = instr->InputAt(0);
1272  ASSERT(input->Equals(instr->result()));
1273  __ not_(ToRegister(input));
1274 }
1275 
1276 
1277 void LCodeGen::DoThrow(LThrow* instr) {
1278  __ push(ToRegister(instr->InputAt(0)));
1279  CallRuntime(Runtime::kThrow, 1, instr);
1280 
1281  if (FLAG_debug_code) {
1282  Comment("Unreachable code.");
1283  __ int3();
1284  }
1285 }
1286 
1287 
1288 void LCodeGen::DoAddI(LAddI* instr) {
1289  LOperand* left = instr->InputAt(0);
1290  LOperand* right = instr->InputAt(1);
1291  ASSERT(left->Equals(instr->result()));
1292 
1293  if (right->IsConstantOperand()) {
1294  __ addl(ToRegister(left),
1295  Immediate(ToInteger32(LConstantOperand::cast(right))));
1296  } else if (right->IsRegister()) {
1297  __ addl(ToRegister(left), ToRegister(right));
1298  } else {
1299  __ addl(ToRegister(left), ToOperand(right));
1300  }
1301 
1302  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1303  DeoptimizeIf(overflow, instr->environment());
1304  }
1305 }
1306 
1307 
1308 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1309  XMMRegister left = ToDoubleRegister(instr->InputAt(0));
1310  XMMRegister right = ToDoubleRegister(instr->InputAt(1));
1311  XMMRegister result = ToDoubleRegister(instr->result());
1312  // All operations except MOD are computed in-place.
1313  ASSERT(instr->op() == Token::MOD || left.is(result));
1314  switch (instr->op()) {
1315  case Token::ADD:
1316  __ addsd(left, right);
1317  break;
1318  case Token::SUB:
1319  __ subsd(left, right);
1320  break;
1321  case Token::MUL:
1322  __ mulsd(left, right);
1323  break;
1324  case Token::DIV:
1325  __ divsd(left, right);
1326  break;
1327  case Token::MOD:
1328  __ PrepareCallCFunction(2);
1329  __ movaps(xmm0, left);
1330  ASSERT(right.is(xmm1));
1331  __ CallCFunction(
1332  ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
1334  __ movaps(result, xmm0);
1335  break;
1336  default:
1337  UNREACHABLE();
1338  break;
1339  }
1340 }
1341 
1342 
1343 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1344  ASSERT(ToRegister(instr->InputAt(0)).is(rdx));
1345  ASSERT(ToRegister(instr->InputAt(1)).is(rax));
1346  ASSERT(ToRegister(instr->result()).is(rax));
1347 
1348  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1349  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1350  __ nop(); // Signals no inlined code.
1351 }
1352 
1353 
1354 int LCodeGen::GetNextEmittedBlock(int block) {
1355  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1356  LLabel* label = chunk_->GetLabel(i);
1357  if (!label->HasReplacement()) return i;
1358  }
1359  return -1;
1360 }
1361 
1362 
1363 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1364  int next_block = GetNextEmittedBlock(current_block_);
1365  right_block = chunk_->LookupDestination(right_block);
1366  left_block = chunk_->LookupDestination(left_block);
1367 
1368  if (right_block == left_block) {
1369  EmitGoto(left_block);
1370  } else if (left_block == next_block) {
1371  __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1372  } else if (right_block == next_block) {
1373  __ j(cc, chunk_->GetAssemblyLabel(left_block));
1374  } else {
1375  __ j(cc, chunk_->GetAssemblyLabel(left_block));
1376  if (cc != always) {
1377  __ jmp(chunk_->GetAssemblyLabel(right_block));
1378  }
1379  }
1380 }
1381 
1382 
1383 void LCodeGen::DoBranch(LBranch* instr) {
1384  int true_block = chunk_->LookupDestination(instr->true_block_id());
1385  int false_block = chunk_->LookupDestination(instr->false_block_id());
1386 
1387  Representation r = instr->hydrogen()->value()->representation();
1388  if (r.IsInteger32()) {
1389  Register reg = ToRegister(instr->InputAt(0));
1390  __ testl(reg, reg);
1391  EmitBranch(true_block, false_block, not_zero);
1392  } else if (r.IsDouble()) {
1393  XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
1394  __ xorps(xmm0, xmm0);
1395  __ ucomisd(reg, xmm0);
1396  EmitBranch(true_block, false_block, not_equal);
1397  } else {
1398  ASSERT(r.IsTagged());
1399  Register reg = ToRegister(instr->InputAt(0));
1400  HType type = instr->hydrogen()->value()->type();
1401  if (type.IsBoolean()) {
1402  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1403  EmitBranch(true_block, false_block, equal);
1404  } else if (type.IsSmi()) {
1405  __ SmiCompare(reg, Smi::FromInt(0));
1406  EmitBranch(true_block, false_block, not_equal);
1407  } else {
1408  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1409  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1410 
1411  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1412  // Avoid deopts in the case where we've never executed this path before.
1413  if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1414 
1415  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1416  // undefined -> false.
1417  __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1418  __ j(equal, false_label);
1419  }
1420  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1421  // true -> true.
1422  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1423  __ j(equal, true_label);
1424  // false -> false.
1425  __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1426  __ j(equal, false_label);
1427  }
1428  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1429  // 'null' -> false.
1430  __ CompareRoot(reg, Heap::kNullValueRootIndex);
1431  __ j(equal, false_label);
1432  }
1433 
1434  if (expected.Contains(ToBooleanStub::SMI)) {
1435  // Smis: 0 -> false, all other -> true.
1436  __ Cmp(reg, Smi::FromInt(0));
1437  __ j(equal, false_label);
1438  __ JumpIfSmi(reg, true_label);
1439  } else if (expected.NeedsMap()) {
1440  // If we need a map later and have a Smi -> deopt.
1441  __ testb(reg, Immediate(kSmiTagMask));
1442  DeoptimizeIf(zero, instr->environment());
1443  }
1444 
1445  const Register map = kScratchRegister;
1446  if (expected.NeedsMap()) {
1447  __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
1448 
1449  if (expected.CanBeUndetectable()) {
1450  // Undetectable -> false.
1451  __ testb(FieldOperand(map, Map::kBitFieldOffset),
1452  Immediate(1 << Map::kIsUndetectable));
1453  __ j(not_zero, false_label);
1454  }
1455  }
1456 
1457  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1458  // spec object -> true.
1459  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
1460  __ j(above_equal, true_label);
1461  }
1462 
1463  if (expected.Contains(ToBooleanStub::STRING)) {
1464  // String value -> false iff empty.
1465  Label not_string;
1466  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
1467  __ j(above_equal, &not_string, Label::kNear);
1468  __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
1469  __ j(not_zero, true_label);
1470  __ jmp(false_label);
1471  __ bind(&not_string);
1472  }
1473 
1474  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1475  // heap number -> false iff +0, -0, or NaN.
1476  Label not_heap_number;
1477  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1478  __ j(not_equal, &not_heap_number, Label::kNear);
1479  __ xorps(xmm0, xmm0);
1480  __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
1481  __ j(zero, false_label);
1482  __ jmp(true_label);
1483  __ bind(&not_heap_number);
1484  }
1485 
1486  // We've seen something for the first time -> deopt.
1487  DeoptimizeIf(no_condition, instr->environment());
1488  }
1489  }
1490 }
1491 
1492 
1493 void LCodeGen::EmitGoto(int block) {
1494  block = chunk_->LookupDestination(block);
1495  int next_block = GetNextEmittedBlock(current_block_);
1496  if (block != next_block) {
1497  __ jmp(chunk_->GetAssemblyLabel(block));
1498  }
1499 }
1500 
1501 
1502 void LCodeGen::DoGoto(LGoto* instr) {
1503  EmitGoto(instr->block_id());
1504 }
1505 
1506 
1507 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1508  Condition cond = no_condition;
1509  switch (op) {
1510  case Token::EQ:
1511  case Token::EQ_STRICT:
1512  cond = equal;
1513  break;
1514  case Token::LT:
1515  cond = is_unsigned ? below : less;
1516  break;
1517  case Token::GT:
1518  cond = is_unsigned ? above : greater;
1519  break;
1520  case Token::LTE:
1521  cond = is_unsigned ? below_equal : less_equal;
1522  break;
1523  case Token::GTE:
1524  cond = is_unsigned ? above_equal : greater_equal;
1525  break;
1526  case Token::IN:
1527  case Token::INSTANCEOF:
1528  default:
1529  UNREACHABLE();
1530  }
1531  return cond;
1532 }
1533 
1534 
1535 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1536  LOperand* left = instr->InputAt(0);
1537  LOperand* right = instr->InputAt(1);
1538  int false_block = chunk_->LookupDestination(instr->false_block_id());
1539  int true_block = chunk_->LookupDestination(instr->true_block_id());
1540  Condition cc = TokenToCondition(instr->op(), instr->is_double());
1541 
1542  if (left->IsConstantOperand() && right->IsConstantOperand()) {
1543  // We can statically evaluate the comparison.
1544  double left_val = ToDouble(LConstantOperand::cast(left));
1545  double right_val = ToDouble(LConstantOperand::cast(right));
1546  int next_block =
1547  EvalComparison(instr->op(), left_val, right_val) ? true_block
1548  : false_block;
1549  EmitGoto(next_block);
1550  } else {
1551  if (instr->is_double()) {
1552  // Don't base result on EFLAGS when a NaN is involved. Instead
1553  // jump to the false block.
1554  __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1555  __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1556  } else {
1557  int32_t value;
1558  if (right->IsConstantOperand()) {
1559  value = ToInteger32(LConstantOperand::cast(right));
1560  __ cmpl(ToRegister(left), Immediate(value));
1561  } else if (left->IsConstantOperand()) {
1562  value = ToInteger32(LConstantOperand::cast(left));
1563  if (right->IsRegister()) {
1564  __ cmpl(ToRegister(right), Immediate(value));
1565  } else {
1566  __ cmpl(ToOperand(right), Immediate(value));
1567  }
1568  // We transposed the operands. Reverse the condition.
1569  cc = ReverseCondition(cc);
1570  } else {
1571  if (right->IsRegister()) {
1572  __ cmpl(ToRegister(left), ToRegister(right));
1573  } else {
1574  __ cmpl(ToRegister(left), ToOperand(right));
1575  }
1576  }
1577  }
1578  EmitBranch(true_block, false_block, cc);
1579  }
1580 }
1581 
1582 
1583 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1584  Register left = ToRegister(instr->InputAt(0));
1585  Register right = ToRegister(instr->InputAt(1));
1586  int false_block = chunk_->LookupDestination(instr->false_block_id());
1587  int true_block = chunk_->LookupDestination(instr->true_block_id());
1588 
1589  __ cmpq(left, right);
1590  EmitBranch(true_block, false_block, equal);
1591 }
1592 
1593 
1594 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1595  Register left = ToRegister(instr->InputAt(0));
1596  int true_block = chunk_->LookupDestination(instr->true_block_id());
1597  int false_block = chunk_->LookupDestination(instr->false_block_id());
1598 
1599  __ cmpq(left, Immediate(instr->hydrogen()->right()));
1600  EmitBranch(true_block, false_block, equal);
1601 }
1602 
1603 
1604 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1605  Register reg = ToRegister(instr->InputAt(0));
1606  int false_block = chunk_->LookupDestination(instr->false_block_id());
1607 
1608  // If the expression is known to be untagged or a smi, then it's definitely
1609  // not null, and it can't be a an undetectable object.
1610  if (instr->hydrogen()->representation().IsSpecialization() ||
1611  instr->hydrogen()->type().IsSmi()) {
1612  EmitGoto(false_block);
1613  return;
1614  }
1615 
1616  int true_block = chunk_->LookupDestination(instr->true_block_id());
1617  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1618  Heap::kNullValueRootIndex :
1619  Heap::kUndefinedValueRootIndex;
1620  __ CompareRoot(reg, nil_value);
1621  if (instr->kind() == kStrictEquality) {
1622  EmitBranch(true_block, false_block, equal);
1623  } else {
1624  Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1625  Heap::kUndefinedValueRootIndex :
1626  Heap::kNullValueRootIndex;
1627  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1628  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1629  __ j(equal, true_label);
1630  __ CompareRoot(reg, other_nil_value);
1631  __ j(equal, true_label);
1632  __ JumpIfSmi(reg, false_label);
1633  // Check for undetectable objects by looking in the bit field in
1634  // the map. The object has already been smi checked.
1635  Register scratch = ToRegister(instr->TempAt(0));
1636  __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1637  __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
1638  Immediate(1 << Map::kIsUndetectable));
1639  EmitBranch(true_block, false_block, not_zero);
1640  }
1641 }
1642 
1643 
1644 Condition LCodeGen::EmitIsObject(Register input,
1645  Label* is_not_object,
1646  Label* is_object) {
1647  ASSERT(!input.is(kScratchRegister));
1648 
1649  __ JumpIfSmi(input, is_not_object);
1650 
1651  __ CompareRoot(input, Heap::kNullValueRootIndex);
1652  __ j(equal, is_object);
1653 
1655  // Undetectable objects behave like undefined.
1657  Immediate(1 << Map::kIsUndetectable));
1658  __ j(not_zero, is_not_object);
1659 
1660  __ movzxbl(kScratchRegister,
1663  __ j(below, is_not_object);
1665  return below_equal;
1666 }
1667 
1668 
1669 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1670  Register reg = ToRegister(instr->InputAt(0));
1671 
1672  int true_block = chunk_->LookupDestination(instr->true_block_id());
1673  int false_block = chunk_->LookupDestination(instr->false_block_id());
1674  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1675  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1676 
1677  Condition true_cond = EmitIsObject(reg, false_label, true_label);
1678 
1679  EmitBranch(true_block, false_block, true_cond);
1680 }
1681 
1682 
1683 Condition LCodeGen::EmitIsString(Register input,
1684  Register temp1,
1685  Label* is_not_string) {
1686  __ JumpIfSmi(input, is_not_string);
1687  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
1688 
1689  return cond;
1690 }
1691 
1692 
1693 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1694  Register reg = ToRegister(instr->InputAt(0));
1695  Register temp = ToRegister(instr->TempAt(0));
1696 
1697  int true_block = chunk_->LookupDestination(instr->true_block_id());
1698  int false_block = chunk_->LookupDestination(instr->false_block_id());
1699  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1700 
1701  Condition true_cond = EmitIsString(reg, temp, false_label);
1702 
1703  EmitBranch(true_block, false_block, true_cond);
1704 }
1705 
1706 
1707 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1708  int true_block = chunk_->LookupDestination(instr->true_block_id());
1709  int false_block = chunk_->LookupDestination(instr->false_block_id());
1710 
1711  Condition is_smi;
1712  if (instr->InputAt(0)->IsRegister()) {
1713  Register input = ToRegister(instr->InputAt(0));
1714  is_smi = masm()->CheckSmi(input);
1715  } else {
1716  Operand input = ToOperand(instr->InputAt(0));
1717  is_smi = masm()->CheckSmi(input);
1718  }
1719  EmitBranch(true_block, false_block, is_smi);
1720 }
1721 
1722 
1723 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1724  Register input = ToRegister(instr->InputAt(0));
1725  Register temp = ToRegister(instr->TempAt(0));
1726 
1727  int true_block = chunk_->LookupDestination(instr->true_block_id());
1728  int false_block = chunk_->LookupDestination(instr->false_block_id());
1729 
1730  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1731  __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
1732  __ testb(FieldOperand(temp, Map::kBitFieldOffset),
1733  Immediate(1 << Map::kIsUndetectable));
1734  EmitBranch(true_block, false_block, not_zero);
1735 }
1736 
1737 
1738 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1739  Token::Value op = instr->op();
1740  int true_block = chunk_->LookupDestination(instr->true_block_id());
1741  int false_block = chunk_->LookupDestination(instr->false_block_id());
1742 
1743  Handle<Code> ic = CompareIC::GetUninitialized(op);
1744  CallCode(ic, RelocInfo::CODE_TARGET, instr);
1745 
1746  Condition condition = TokenToCondition(op, false);
1747  __ testq(rax, rax);
1748 
1749  EmitBranch(true_block, false_block, condition);
1750 }
1751 
1752 
1753 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1754  InstanceType from = instr->from();
1755  InstanceType to = instr->to();
1756  if (from == FIRST_TYPE) return to;
1757  ASSERT(from == to || to == LAST_TYPE);
1758  return from;
1759 }
1760 
1761 
1762 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1763  InstanceType from = instr->from();
1764  InstanceType to = instr->to();
1765  if (from == to) return equal;
1766  if (to == LAST_TYPE) return above_equal;
1767  if (from == FIRST_TYPE) return below_equal;
1768  UNREACHABLE();
1769  return equal;
1770 }
1771 
1772 
1773 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1774  Register input = ToRegister(instr->InputAt(0));
1775 
1776  int true_block = chunk_->LookupDestination(instr->true_block_id());
1777  int false_block = chunk_->LookupDestination(instr->false_block_id());
1778 
1779  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1780 
1781  __ JumpIfSmi(input, false_label);
1782 
1783  __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
1784  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
1785 }
1786 
1787 
1788 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1789  Register input = ToRegister(instr->InputAt(0));
1790  Register result = ToRegister(instr->result());
1791 
1792  if (FLAG_debug_code) {
1793  __ AbortIfNotString(input);
1794  }
1795 
1796  __ movl(result, FieldOperand(input, String::kHashFieldOffset));
1798  __ IndexFromHash(result, result);
1799 }
1800 
1801 
1802 void LCodeGen::DoHasCachedArrayIndexAndBranch(
1803  LHasCachedArrayIndexAndBranch* instr) {
1804  Register input = ToRegister(instr->InputAt(0));
1805 
1806  int true_block = chunk_->LookupDestination(instr->true_block_id());
1807  int false_block = chunk_->LookupDestination(instr->false_block_id());
1808 
1811  EmitBranch(true_block, false_block, equal);
1812 }
1813 
1814 
1815 // Branches to a label or falls through with the answer in the z flag.
1816 // Trashes the temp register.
1817 void LCodeGen::EmitClassOfTest(Label* is_true,
1818  Label* is_false,
1819  Handle<String> class_name,
1820  Register input,
1821  Register temp,
1822  Register temp2) {
1823  ASSERT(!input.is(temp));
1824  ASSERT(!input.is(temp2));
1825  ASSERT(!temp.is(temp2));
1826 
1827  __ JumpIfSmi(input, is_false);
1828 
1829  if (class_name->IsEqualTo(CStrVector("Function"))) {
1830  // Assuming the following assertions, we can use the same compares to test
1831  // for both being a function type and being in the object type range.
1836  LAST_SPEC_OBJECT_TYPE - 1);
1838  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
1839  __ j(below, is_false);
1840  __ j(equal, is_true);
1841  __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
1842  __ j(equal, is_true);
1843  } else {
1844  // Faster code path to avoid two compares: subtract lower bound from the
1845  // actual type and do a signed compare with the width of the type range.
1846  __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
1847  __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
1848  __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1849  __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
1851  __ j(above, is_false);
1852  }
1853 
1854  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
1855  // Check if the constructor in the map is a function.
1856  __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
1857 
1858  // Objects with a non-function constructor have class 'Object'.
1859  __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
1860  if (class_name->IsEqualTo(CStrVector("Object"))) {
1861  __ j(not_equal, is_true);
1862  } else {
1863  __ j(not_equal, is_false);
1864  }
1865 
1866  // temp now contains the constructor function. Grab the
1867  // instance class name from there.
1869  __ movq(temp, FieldOperand(temp,
1871  // The class name we are testing against is a symbol because it's a literal.
1872  // The name in the constructor is a symbol because of the way the context is
1873  // booted. This routine isn't expected to work for random API-created
1874  // classes and it doesn't have to because you can't access it with natives
1875  // syntax. Since both sides are symbols it is sufficient to use an identity
1876  // comparison.
1877  ASSERT(class_name->IsSymbol());
1878  __ Cmp(temp, class_name);
1879  // End with the answer in the z flag.
1880 }
1881 
1882 
1883 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1884  Register input = ToRegister(instr->InputAt(0));
1885  Register temp = ToRegister(instr->TempAt(0));
1886  Register temp2 = ToRegister(instr->TempAt(1));
1887  Handle<String> class_name = instr->hydrogen()->class_name();
1888 
1889  int true_block = chunk_->LookupDestination(instr->true_block_id());
1890  int false_block = chunk_->LookupDestination(instr->false_block_id());
1891 
1892  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1893  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1894 
1895  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1896 
1897  EmitBranch(true_block, false_block, equal);
1898 }
1899 
1900 
1901 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1902  Register reg = ToRegister(instr->InputAt(0));
1903  int true_block = instr->true_block_id();
1904  int false_block = instr->false_block_id();
1905 
1906  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
1907  EmitBranch(true_block, false_block, equal);
1908 }
1909 
1910 
1911 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
1912  InstanceofStub stub(InstanceofStub::kNoFlags);
1913  __ push(ToRegister(instr->InputAt(0)));
1914  __ push(ToRegister(instr->InputAt(1)));
1915  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1916  Label true_value, done;
1917  __ testq(rax, rax);
1918  __ j(zero, &true_value, Label::kNear);
1919  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
1920  __ jmp(&done, Label::kNear);
1921  __ bind(&true_value);
1922  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
1923  __ bind(&done);
1924 }
1925 
1926 
1927 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
1928  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
1929  public:
1930  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
1931  LInstanceOfKnownGlobal* instr)
1932  : LDeferredCode(codegen), instr_(instr) { }
1933  virtual void Generate() {
1934  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
1935  }
1936  virtual LInstruction* instr() { return instr_; }
1937  Label* map_check() { return &map_check_; }
1938  private:
1939  LInstanceOfKnownGlobal* instr_;
1940  Label map_check_;
1941  };
1942 
1943 
1944  DeferredInstanceOfKnownGlobal* deferred;
1945  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
1946 
1947  Label done, false_result;
1948  Register object = ToRegister(instr->InputAt(0));
1949 
1950  // A Smi is not an instance of anything.
1951  __ JumpIfSmi(object, &false_result);
1952 
1953  // This is the inlined call site instanceof cache. The two occurences of the
1954  // hole value will be patched to the last map/result pair generated by the
1955  // instanceof stub.
1956  Label cache_miss;
1957  // Use a temp register to avoid memory operands with variable lengths.
1958  Register map = ToRegister(instr->TempAt(0));
1959  __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
1960  __ bind(deferred->map_check()); // Label for calculating code patching.
1961  Handle<JSGlobalPropertyCell> cache_cell =
1962  factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
1963  __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
1964  __ cmpq(map, Operand(kScratchRegister, 0));
1965  __ j(not_equal, &cache_miss, Label::kNear);
1966  // Patched to load either true or false.
1967  __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
1968 #ifdef DEBUG
1969  // Check that the code size between patch label and patch sites is invariant.
1970  Label end_of_patched_code;
1971  __ bind(&end_of_patched_code);
1972  ASSERT(true);
1973 #endif
1974  __ jmp(&done);
1975 
1976  // The inlined call site cache did not match. Check for null and string
1977  // before calling the deferred code.
1978  __ bind(&cache_miss); // Null is not an instance of anything.
1979  __ CompareRoot(object, Heap::kNullValueRootIndex);
1980  __ j(equal, &false_result, Label::kNear);
1981 
1982  // String values are not instances of anything.
1983  __ JumpIfNotString(object, kScratchRegister, deferred->entry());
1984 
1985  __ bind(&false_result);
1986  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
1987 
1988  __ bind(deferred->exit());
1989  __ bind(&done);
1990 }
1991 
1992 
1993 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
1994  Label* map_check) {
1995  {
1996  PushSafepointRegistersScope scope(this);
1999  InstanceofStub stub(flags);
2000 
2001  __ push(ToRegister(instr->InputAt(0)));
2002  __ PushHeapObject(instr->function());
2003 
2004  static const int kAdditionalDelta = 10;
2005  int delta =
2006  masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2007  ASSERT(delta >= 0);
2008  __ push_imm32(delta);
2009 
2010  // We are pushing three values on the stack but recording a
2011  // safepoint with two arguments because stub is going to
2012  // remove the third argument from the stack before jumping
2013  // to instanceof builtin on the slow path.
2014  CallCodeGeneric(stub.GetCode(),
2015  RelocInfo::CODE_TARGET,
2016  instr,
2017  RECORD_SAFEPOINT_WITH_REGISTERS,
2018  2);
2019  ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2020  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2021  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2022  // Move result to a register that survives the end of the
2023  // PushSafepointRegisterScope.
2024  __ movq(kScratchRegister, rax);
2025  }
2027  Label load_false;
2028  Label done;
2029  __ j(not_zero, &load_false);
2030  __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2031  __ jmp(&done);
2032  __ bind(&load_false);
2033  __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2034  __ bind(&done);
2035 }
2036 
2037 
2038 void LCodeGen::DoCmpT(LCmpT* instr) {
2039  Token::Value op = instr->op();
2040 
2041  Handle<Code> ic = CompareIC::GetUninitialized(op);
2042  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2043 
2044  Condition condition = TokenToCondition(op, false);
2045  Label true_value, done;
2046  __ testq(rax, rax);
2047  __ j(condition, &true_value, Label::kNear);
2048  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2049  __ jmp(&done, Label::kNear);
2050  __ bind(&true_value);
2051  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2052  __ bind(&done);
2053 }
2054 
2055 
2056 void LCodeGen::DoReturn(LReturn* instr) {
2057  if (FLAG_trace) {
2058  // Preserve the return value on the stack and rely on the runtime
2059  // call to return the value in the same register.
2060  __ push(rax);
2061  __ CallRuntime(Runtime::kTraceExit, 1);
2062  }
2063  __ movq(rsp, rbp);
2064  __ pop(rbp);
2065  __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
2066 }
2067 
2068 
2069 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2070  Register result = ToRegister(instr->result());
2071  __ LoadGlobalCell(result, instr->hydrogen()->cell());
2072  if (instr->hydrogen()->RequiresHoleCheck()) {
2073  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2074  DeoptimizeIf(equal, instr->environment());
2075  }
2076 }
2077 
2078 
2079 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2080  ASSERT(ToRegister(instr->global_object()).is(rax));
2081  ASSERT(ToRegister(instr->result()).is(rax));
2082 
2083  __ Move(rcx, instr->name());
2084  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
2085  RelocInfo::CODE_TARGET_CONTEXT;
2086  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2087  CallCode(ic, mode, instr);
2088 }
2089 
2090 
2091 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2092  Register value = ToRegister(instr->value());
2093  Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
2094 
2095  // If the cell we are storing to contains the hole it could have
2096  // been deleted from the property dictionary. In that case, we need
2097  // to update the property details in the property dictionary to mark
2098  // it as no longer deleted. We deoptimize in that case.
2099  if (instr->hydrogen()->RequiresHoleCheck()) {
2100  // We have a temp because CompareRoot might clobber kScratchRegister.
2101  Register cell = ToRegister(instr->TempAt(0));
2102  ASSERT(!value.is(cell));
2103  __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
2104  __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2105  DeoptimizeIf(equal, instr->environment());
2106  // Store the value.
2107  __ movq(Operand(cell, 0), value);
2108  } else {
2109  // Store the value.
2110  __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
2111  __ movq(Operand(kScratchRegister, 0), value);
2112  }
2113  // Cells are always rescanned, so no write barrier here.
2114 }
2115 
2116 
2117 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2118  ASSERT(ToRegister(instr->global_object()).is(rdx));
2119  ASSERT(ToRegister(instr->value()).is(rax));
2120 
2121  __ Move(rcx, instr->name());
2122  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2123  ? isolate()->builtins()->StoreIC_Initialize_Strict()
2124  : isolate()->builtins()->StoreIC_Initialize();
2125  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2126 }
2127 
2128 
2129 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2130  Register context = ToRegister(instr->context());
2131  Register result = ToRegister(instr->result());
2132  __ movq(result, ContextOperand(context, instr->slot_index()));
2133  if (instr->hydrogen()->RequiresHoleCheck()) {
2134  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2135  if (instr->hydrogen()->DeoptimizesOnHole()) {
2136  DeoptimizeIf(equal, instr->environment());
2137  } else {
2138  Label is_not_hole;
2139  __ j(not_equal, &is_not_hole, Label::kNear);
2140  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2141  __ bind(&is_not_hole);
2142  }
2143  }
2144 }
2145 
2146 
2147 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2148  Register context = ToRegister(instr->context());
2149  Register value = ToRegister(instr->value());
2150 
2151  Operand target = ContextOperand(context, instr->slot_index());
2152 
2153  Label skip_assignment;
2154  if (instr->hydrogen()->RequiresHoleCheck()) {
2155  __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2156  if (instr->hydrogen()->DeoptimizesOnHole()) {
2157  DeoptimizeIf(equal, instr->environment());
2158  } else {
2159  __ j(not_equal, &skip_assignment);
2160  }
2161  }
2162  __ movq(target, value);
2163 
2164  if (instr->hydrogen()->NeedsWriteBarrier()) {
2165  HType type = instr->hydrogen()->value()->type();
2166  SmiCheck check_needed =
2167  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2168  int offset = Context::SlotOffset(instr->slot_index());
2169  Register scratch = ToRegister(instr->TempAt(0));
2170  __ RecordWriteContextSlot(context,
2171  offset,
2172  value,
2173  scratch,
2174  kSaveFPRegs,
2176  check_needed);
2177  }
2178 
2179  __ bind(&skip_assignment);
2180 }
2181 
2182 
2183 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2184  Register object = ToRegister(instr->InputAt(0));
2185  Register result = ToRegister(instr->result());
2186  if (instr->hydrogen()->is_in_object()) {
2187  __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
2188  } else {
2189  __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
2190  __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
2191  }
2192 }
2193 
2194 
2195 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2196  Register object,
2197  Handle<Map> type,
2198  Handle<String> name,
2199  LEnvironment* env) {
2200  LookupResult lookup(isolate());
2201  type->LookupInDescriptors(NULL, *name, &lookup);
2202  ASSERT(lookup.IsFound() || lookup.IsCacheable());
2203  if (lookup.IsFound() && lookup.type() == FIELD) {
2204  int index = lookup.GetLocalFieldIndexFromMap(*type);
2205  int offset = index * kPointerSize;
2206  if (index < 0) {
2207  // Negative property indices are in-object properties, indexed
2208  // from the end of the fixed part of the object.
2209  __ movq(result, FieldOperand(object, offset + type->instance_size()));
2210  } else {
2211  // Non-negative property indices are in the properties array.
2212  __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
2213  __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
2214  }
2215  } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
2216  Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2217  __ LoadHeapObject(result, function);
2218  } else {
2219  // Negative lookup.
2220  // Check prototypes.
2221  HeapObject* current = HeapObject::cast((*type)->prototype());
2222  Heap* heap = type->GetHeap();
2223  while (current != heap->null_value()) {
2224  Handle<HeapObject> link(current);
2225  __ LoadHeapObject(result, link);
2226  __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
2227  Handle<Map>(JSObject::cast(current)->map()));
2228  DeoptimizeIf(not_equal, env);
2229  current = HeapObject::cast(current->map()->prototype());
2230  }
2231  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2232  }
2233 }
2234 
2235 
2236 // Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
2237 // prototype chain, which causes unbounded code generation.
2238 static bool CompactEmit(
2239  SmallMapList* list, Handle<String> name, int i, Isolate* isolate) {
2240  LookupResult lookup(isolate);
2241  Handle<Map> map = list->at(i);
2242  map->LookupInDescriptors(NULL, *name, &lookup);
2243  return lookup.IsFound() &&
2244  (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION);
2245 }
2246 
2247 
2248 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2249  Register object = ToRegister(instr->object());
2250  Register result = ToRegister(instr->result());
2251 
2252  int map_count = instr->hydrogen()->types()->length();
2253  bool need_generic = instr->hydrogen()->need_generic();
2254 
2255  if (map_count == 0 && !need_generic) {
2256  DeoptimizeIf(no_condition, instr->environment());
2257  return;
2258  }
2259  Handle<String> name = instr->hydrogen()->name();
2260  Label done;
2261  bool all_are_compact = true;
2262  for (int i = 0; i < map_count; ++i) {
2263  if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
2264  all_are_compact = false;
2265  break;
2266  }
2267  }
2268  for (int i = 0; i < map_count; ++i) {
2269  bool last = (i == map_count - 1);
2270  Handle<Map> map = instr->hydrogen()->types()->at(i);
2271  Label check_passed;
2272  __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
2273  if (last && !need_generic) {
2274  DeoptimizeIf(not_equal, instr->environment());
2275  __ bind(&check_passed);
2276  EmitLoadFieldOrConstantFunction(
2277  result, object, map, name, instr->environment());
2278  } else {
2279  Label next;
2280  bool compact = all_are_compact ? true :
2281  CompactEmit(instr->hydrogen()->types(), name, i, isolate());
2282  __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
2283  __ bind(&check_passed);
2284  EmitLoadFieldOrConstantFunction(
2285  result, object, map, name, instr->environment());
2286  __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
2287  __ bind(&next);
2288  }
2289  }
2290  if (need_generic) {
2291  __ Move(rcx, name);
2292  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2293  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2294  }
2295  __ bind(&done);
2296 }
2297 
2298 
2299 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2300  ASSERT(ToRegister(instr->object()).is(rax));
2301  ASSERT(ToRegister(instr->result()).is(rax));
2302 
2303  __ Move(rcx, instr->name());
2304  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2305  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2306 }
2307 
2308 
2309 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2310  Register function = ToRegister(instr->function());
2311  Register result = ToRegister(instr->result());
2312 
2313  // Check that the function really is a function.
2314  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2315  DeoptimizeIf(not_equal, instr->environment());
2316 
2317  // Check whether the function has an instance prototype.
2318  Label non_instance;
2319  __ testb(FieldOperand(result, Map::kBitFieldOffset),
2320  Immediate(1 << Map::kHasNonInstancePrototype));
2321  __ j(not_zero, &non_instance, Label::kNear);
2322 
2323  // Get the prototype or initial map from the function.
2324  __ movq(result,
2326 
2327  // Check that the function has a prototype or an initial map.
2328  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2329  DeoptimizeIf(equal, instr->environment());
2330 
2331  // If the function does not have an initial map, we're done.
2332  Label done;
2333  __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
2334  __ j(not_equal, &done, Label::kNear);
2335 
2336  // Get the prototype from the initial map.
2337  __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
2338  __ jmp(&done, Label::kNear);
2339 
2340  // Non-instance prototype: Fetch prototype from constructor field
2341  // in the function's map.
2342  __ bind(&non_instance);
2343  __ movq(result, FieldOperand(result, Map::kConstructorOffset));
2344 
2345  // All done.
2346  __ bind(&done);
2347 }
2348 
2349 
2350 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2351  Register result = ToRegister(instr->result());
2352  Register input = ToRegister(instr->InputAt(0));
2353  __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
2354  if (FLAG_debug_code) {
2355  Label done, ok, fail;
2356  __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
2357  Heap::kFixedArrayMapRootIndex);
2358  __ j(equal, &done, Label::kNear);
2359  __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
2360  Heap::kFixedCOWArrayMapRootIndex);
2361  __ j(equal, &done, Label::kNear);
2362  Register temp((result.is(rax)) ? rbx : rax);
2363  __ push(temp);
2364  __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
2365  __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
2366  __ and_(temp, Immediate(Map::kElementsKindMask));
2367  __ shr(temp, Immediate(Map::kElementsKindShift));
2368  __ cmpl(temp, Immediate(GetInitialFastElementsKind()));
2369  __ j(less, &fail, Label::kNear);
2370  __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND));
2371  __ j(less_equal, &ok, Label::kNear);
2372  __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2373  __ j(less, &fail, Label::kNear);
2374  __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2375  __ j(less_equal, &ok, Label::kNear);
2376  __ bind(&fail);
2377  __ Abort("Check for fast or external elements failed");
2378  __ bind(&ok);
2379  __ pop(temp);
2380  __ bind(&done);
2381  }
2382 }
2383 
2384 
2385 void LCodeGen::DoLoadExternalArrayPointer(
2386  LLoadExternalArrayPointer* instr) {
2387  Register result = ToRegister(instr->result());
2388  Register input = ToRegister(instr->InputAt(0));
2389  __ movq(result, FieldOperand(input,
2391 }
2392 
2393 
2394 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2395  Register arguments = ToRegister(instr->arguments());
2396  Register length = ToRegister(instr->length());
2397  Register result = ToRegister(instr->result());
2398 
2399  if (instr->index()->IsRegister()) {
2400  __ subl(length, ToRegister(instr->index()));
2401  } else {
2402  __ subl(length, ToOperand(instr->index()));
2403  }
2404  DeoptimizeIf(below_equal, instr->environment());
2405 
2406  // There are two words between the frame pointer and the last argument.
2407  // Subtracting from length accounts for one of them add one more.
2408  __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
2409 }
2410 
2411 
2412 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2413  Register result = ToRegister(instr->result());
2414 
2415  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
2416  // Sign extend key because it could be a 32 bit negative value
2417  // and the dehoisted address computation happens in 64 bits.
2418  Register key_reg = ToRegister(instr->key());
2419  __ movsxlq(key_reg, key_reg);
2420  }
2421 
2422  // Load the result.
2423  __ movq(result,
2424  BuildFastArrayOperand(instr->elements(),
2425  instr->key(),
2426  FAST_ELEMENTS,
2428  instr->additional_index()));
2429 
2430  // Check for the hole value.
2431  if (instr->hydrogen()->RequiresHoleCheck()) {
2432  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2433  Condition smi = __ CheckSmi(result);
2434  DeoptimizeIf(NegateCondition(smi), instr->environment());
2435  } else {
2436  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2437  DeoptimizeIf(equal, instr->environment());
2438  }
2439  }
2440 }
2441 
2442 
2443 void LCodeGen::DoLoadKeyedFastDoubleElement(
2444  LLoadKeyedFastDoubleElement* instr) {
2445  XMMRegister result(ToDoubleRegister(instr->result()));
2446 
2447  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
2448  // Sign extend key because it could be a 32 bit negative value
2449  // and the dehoisted address computation happens in 64 bits
2450  Register key_reg = ToRegister(instr->key());
2451  __ movsxlq(key_reg, key_reg);
2452  }
2453 
2454  if (instr->hydrogen()->RequiresHoleCheck()) {
2455  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
2456  sizeof(kHoleNanLower32);
2457  Operand hole_check_operand = BuildFastArrayOperand(
2458  instr->elements(),
2459  instr->key(),
2461  offset,
2462  instr->additional_index());
2463  __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
2464  DeoptimizeIf(equal, instr->environment());
2465  }
2466 
2467  Operand double_load_operand = BuildFastArrayOperand(
2468  instr->elements(),
2469  instr->key(),
2472  instr->additional_index());
2473  __ movsd(result, double_load_operand);
2474 }
2475 
2476 
2477 Operand LCodeGen::BuildFastArrayOperand(
2478  LOperand* elements_pointer,
2479  LOperand* key,
2480  ElementsKind elements_kind,
2481  uint32_t offset,
2482  uint32_t additional_index) {
2483  Register elements_pointer_reg = ToRegister(elements_pointer);
2484  int shift_size = ElementsKindToShiftSize(elements_kind);
2485  if (key->IsConstantOperand()) {
2486  int constant_value = ToInteger32(LConstantOperand::cast(key));
2487  if (constant_value & 0xF0000000) {
2488  Abort("array index constant value too big");
2489  }
2490  return Operand(elements_pointer_reg,
2491  ((constant_value + additional_index) << shift_size)
2492  + offset);
2493  } else {
2494  ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
2495  return Operand(elements_pointer_reg,
2496  ToRegister(key),
2497  scale_factor,
2498  offset + (additional_index << shift_size));
2499  }
2500 }
2501 
2502 
2503 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2504  LLoadKeyedSpecializedArrayElement* instr) {
2505  ElementsKind elements_kind = instr->elements_kind();
2506  Operand operand(BuildFastArrayOperand(instr->external_pointer(),
2507  instr->key(),
2508  elements_kind,
2509  0,
2510  instr->additional_index()));
2511  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
2512  // Sign extend key because it could be a 32 bit negative value
2513  // and the dehoisted address computation happens in 64 bits
2514  Register key_reg = ToRegister(instr->key());
2515  __ movsxlq(key_reg, key_reg);
2516  }
2517 
2518  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2519  XMMRegister result(ToDoubleRegister(instr->result()));
2520  __ movss(result, operand);
2521  __ cvtss2sd(result, result);
2522  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2523  __ movsd(ToDoubleRegister(instr->result()), operand);
2524  } else {
2525  Register result(ToRegister(instr->result()));
2526  switch (elements_kind) {
2528  __ movsxbq(result, operand);
2529  break;
2532  __ movzxbq(result, operand);
2533  break;
2535  __ movsxwq(result, operand);
2536  break;
2538  __ movzxwq(result, operand);
2539  break;
2540  case EXTERNAL_INT_ELEMENTS:
2541  __ movsxlq(result, operand);
2542  break;
2544  __ movl(result, operand);
2545  __ testl(result, result);
2546  // TODO(danno): we could be more clever here, perhaps having a special
2547  // version of the stub that detects if the overflow case actually
2548  // happens, and generate code that returns a double rather than int.
2549  DeoptimizeIf(negative, instr->environment());
2550  break;
2553  case FAST_ELEMENTS:
2554  case FAST_SMI_ELEMENTS:
2555  case FAST_DOUBLE_ELEMENTS:
2556  case FAST_HOLEY_ELEMENTS:
2559  case DICTIONARY_ELEMENTS:
2561  UNREACHABLE();
2562  break;
2563  }
2564  }
2565 }
2566 
2567 
2568 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2569  ASSERT(ToRegister(instr->object()).is(rdx));
2570  ASSERT(ToRegister(instr->key()).is(rax));
2571 
2572  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2573  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2574 }
2575 
2576 
2577 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2578  Register result = ToRegister(instr->result());
2579 
2580  if (instr->hydrogen()->from_inlined()) {
2581  __ lea(result, Operand(rsp, -2 * kPointerSize));
2582  } else {
2583  // Check for arguments adapter frame.
2584  Label done, adapted;
2585  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2586  __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
2588  __ j(equal, &adapted, Label::kNear);
2589 
2590  // No arguments adaptor frame.
2591  __ movq(result, rbp);
2592  __ jmp(&done, Label::kNear);
2593 
2594  // Arguments adaptor frame present.
2595  __ bind(&adapted);
2596  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2597 
2598  // Result is the frame pointer for the frame if not adapted and for the real
2599  // frame below the adaptor frame if adapted.
2600  __ bind(&done);
2601  }
2602 }
2603 
2604 
2605 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2606  Register result = ToRegister(instr->result());
2607 
2608  Label done;
2609 
2610  // If no arguments adaptor frame the number of arguments is fixed.
2611  if (instr->InputAt(0)->IsRegister()) {
2612  __ cmpq(rbp, ToRegister(instr->InputAt(0)));
2613  } else {
2614  __ cmpq(rbp, ToOperand(instr->InputAt(0)));
2615  }
2616  __ movl(result, Immediate(scope()->num_parameters()));
2617  __ j(equal, &done, Label::kNear);
2618 
2619  // Arguments adaptor frame present. Get argument length from there.
2620  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2621  __ SmiToInteger32(result,
2622  Operand(result,
2624 
2625  // Argument length is in result register.
2626  __ bind(&done);
2627 }
2628 
2629 
2630 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2631  Register receiver = ToRegister(instr->receiver());
2632  Register function = ToRegister(instr->function());
2633 
2634  // If the receiver is null or undefined, we have to pass the global
2635  // object as a receiver to normal functions. Values have to be
2636  // passed unchanged to builtins and strict-mode functions.
2637  Label global_object, receiver_ok;
2638 
2639  // Do not transform the receiver to object for strict mode
2640  // functions.
2641  __ movq(kScratchRegister,
2646  __ j(not_equal, &receiver_ok, Label::kNear);
2647 
2648  // Do not transform the receiver to object for builtins.
2652  __ j(not_equal, &receiver_ok, Label::kNear);
2653 
2654  // Normal function. Replace undefined or null with global receiver.
2655  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
2656  __ j(equal, &global_object, Label::kNear);
2657  __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
2658  __ j(equal, &global_object, Label::kNear);
2659 
2660  // The receiver should be a JS object.
2661  Condition is_smi = __ CheckSmi(receiver);
2662  DeoptimizeIf(is_smi, instr->environment());
2663  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
2664  DeoptimizeIf(below, instr->environment());
2665  __ jmp(&receiver_ok, Label::kNear);
2666 
2667  __ bind(&global_object);
2668  // TODO(kmillikin): We have a hydrogen value for the global object. See
2669  // if it's better to use it than to explicitly fetch it from the context
2670  // here.
2671  __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_INDEX));
2672  __ movq(receiver,
2674  __ bind(&receiver_ok);
2675 }
2676 
2677 
2678 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2679  Register receiver = ToRegister(instr->receiver());
2680  Register function = ToRegister(instr->function());
2681  Register length = ToRegister(instr->length());
2682  Register elements = ToRegister(instr->elements());
2683  ASSERT(receiver.is(rax)); // Used for parameter count.
2684  ASSERT(function.is(rdi)); // Required by InvokeFunction.
2685  ASSERT(ToRegister(instr->result()).is(rax));
2686 
2687  // Copy the arguments to this function possibly from the
2688  // adaptor frame below it.
2689  const uint32_t kArgumentsLimit = 1 * KB;
2690  __ cmpq(length, Immediate(kArgumentsLimit));
2691  DeoptimizeIf(above, instr->environment());
2692 
2693  __ push(receiver);
2694  __ movq(receiver, length);
2695 
2696  // Loop through the arguments pushing them onto the execution
2697  // stack.
2698  Label invoke, loop;
2699  // length is a small non-negative integer, due to the test above.
2700  __ testl(length, length);
2701  __ j(zero, &invoke, Label::kNear);
2702  __ bind(&loop);
2703  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
2704  __ decl(length);
2705  __ j(not_zero, &loop);
2706 
2707  // Invoke the function.
2708  __ bind(&invoke);
2709  ASSERT(instr->HasPointerMap());
2710  LPointerMap* pointers = instr->pointer_map();
2711  RecordPosition(pointers->position());
2712  SafepointGenerator safepoint_generator(
2713  this, pointers, Safepoint::kLazyDeopt);
2714  ParameterCount actual(rax);
2715  __ InvokeFunction(function, actual, CALL_FUNCTION,
2716  safepoint_generator, CALL_AS_METHOD);
2718 }
2719 
2720 
2721 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2722  LOperand* argument = instr->InputAt(0);
2723  EmitPushTaggedOperand(argument);
2724 }
2725 
2726 
2727 void LCodeGen::DoDrop(LDrop* instr) {
2728  __ Drop(instr->count());
2729 }
2730 
2731 
2732 void LCodeGen::DoThisFunction(LThisFunction* instr) {
2733  Register result = ToRegister(instr->result());
2734  __ LoadHeapObject(result, instr->hydrogen()->closure());
2735 }
2736 
2737 
2738 void LCodeGen::DoContext(LContext* instr) {
2739  Register result = ToRegister(instr->result());
2740  __ movq(result, rsi);
2741 }
2742 
2743 
2744 void LCodeGen::DoOuterContext(LOuterContext* instr) {
2745  Register context = ToRegister(instr->context());
2746  Register result = ToRegister(instr->result());
2747  __ movq(result,
2748  Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2749 }
2750 
2751 
2752 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2753  __ push(rsi); // The context is the first argument.
2754  __ PushHeapObject(instr->hydrogen()->pairs());
2755  __ Push(Smi::FromInt(instr->hydrogen()->flags()));
2756  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
2757 }
2758 
2759 
2760 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2761  Register result = ToRegister(instr->result());
2762  __ movq(result, GlobalObjectOperand());
2763 }
2764 
2765 
2766 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2767  Register global = ToRegister(instr->global());
2768  Register result = ToRegister(instr->result());
2769  __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
2770 }
2771 
2772 
2773 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2774  int arity,
2775  LInstruction* instr,
2776  CallKind call_kind,
2777  RDIState rdi_state) {
2778  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
2779  function->shared()->formal_parameter_count() == arity;
2780 
2781  LPointerMap* pointers = instr->pointer_map();
2782  RecordPosition(pointers->position());
2783 
2784  if (can_invoke_directly) {
2785  if (rdi_state == RDI_UNINITIALIZED) {
2786  __ LoadHeapObject(rdi, function);
2787  }
2788 
2789  // Change context if needed.
2790  bool change_context =
2791  (info()->closure()->context() != function->context()) ||
2792  scope()->contains_with() ||
2793  (scope()->num_heap_slots() > 0);
2794  if (change_context) {
2796  }
2797 
2798  // Set rax to arguments count if adaption is not needed. Assumes that rax
2799  // is available to write to at this point.
2800  if (!function->NeedsArgumentsAdaption()) {
2801  __ Set(rax, arity);
2802  }
2803 
2804  // Invoke function.
2805  __ SetCallKind(rcx, call_kind);
2806  if (*function == *info()->closure()) {
2807  __ CallSelf();
2808  } else {
2810  }
2811 
2812  // Set up deoptimization.
2813  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
2814  } else {
2815  // We need to adapt arguments.
2816  SafepointGenerator generator(
2817  this, pointers, Safepoint::kLazyDeopt);
2818  ParameterCount count(arity);
2819  __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
2820  }
2821 
2822  // Restore context.
2824 }
2825 
2826 
2827 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2828  ASSERT(ToRegister(instr->result()).is(rax));
2829  CallKnownFunction(instr->function(),
2830  instr->arity(),
2831  instr,
2833  RDI_UNINITIALIZED);
2834 }
2835 
2836 
2837 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2838  Register input_reg = ToRegister(instr->InputAt(0));
2839  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
2840  Heap::kHeapNumberMapRootIndex);
2841  DeoptimizeIf(not_equal, instr->environment());
2842 
2843  Label done;
2844  Register tmp = input_reg.is(rax) ? rcx : rax;
2845  Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
2846 
2847  // Preserve the value of all registers.
2848  PushSafepointRegistersScope scope(this);
2849 
2850  Label negative;
2851  __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2852  // Check the sign of the argument. If the argument is positive, just
2853  // return it. We do not need to patch the stack since |input| and
2854  // |result| are the same register and |input| will be restored
2855  // unchanged by popping safepoint registers.
2856  __ testl(tmp, Immediate(HeapNumber::kSignMask));
2857  __ j(not_zero, &negative);
2858  __ jmp(&done);
2859 
2860  __ bind(&negative);
2861 
2862  Label allocated, slow;
2863  __ AllocateHeapNumber(tmp, tmp2, &slow);
2864  __ jmp(&allocated);
2865 
2866  // Slow case: Call the runtime system to do the number allocation.
2867  __ bind(&slow);
2868 
2869  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2870  // Set the pointer to the new heap number in tmp.
2871  if (!tmp.is(rax)) {
2872  __ movq(tmp, rax);
2873  }
2874 
2875  // Restore input_reg after call to runtime.
2876  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
2877 
2878  __ bind(&allocated);
2879  __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
2880  __ shl(tmp2, Immediate(1));
2881  __ shr(tmp2, Immediate(1));
2882  __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
2883  __ StoreToSafepointRegisterSlot(input_reg, tmp);
2884 
2885  __ bind(&done);
2886 }
2887 
2888 
2889 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2890  Register input_reg = ToRegister(instr->InputAt(0));
2891  __ testl(input_reg, input_reg);
2892  Label is_positive;
2893  __ j(not_sign, &is_positive);
2894  __ negl(input_reg); // Sets flags.
2895  DeoptimizeIf(negative, instr->environment());
2896  __ bind(&is_positive);
2897 }
2898 
2899 
2900 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2901  // Class for deferred case.
2902  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2903  public:
2904  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2905  LUnaryMathOperation* instr)
2906  : LDeferredCode(codegen), instr_(instr) { }
2907  virtual void Generate() {
2908  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2909  }
2910  virtual LInstruction* instr() { return instr_; }
2911  private:
2912  LUnaryMathOperation* instr_;
2913  };
2914 
2915  ASSERT(instr->InputAt(0)->Equals(instr->result()));
2916  Representation r = instr->hydrogen()->value()->representation();
2917 
2918  if (r.IsDouble()) {
2919  XMMRegister scratch = xmm0;
2920  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2921  __ xorps(scratch, scratch);
2922  __ subsd(scratch, input_reg);
2923  __ andpd(input_reg, scratch);
2924  } else if (r.IsInteger32()) {
2925  EmitIntegerMathAbs(instr);
2926  } else { // Tagged case.
2927  DeferredMathAbsTaggedHeapNumber* deferred =
2928  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
2929  Register input_reg = ToRegister(instr->InputAt(0));
2930  // Smi check.
2931  __ JumpIfNotSmi(input_reg, deferred->entry());
2932  __ SmiToInteger32(input_reg, input_reg);
2933  EmitIntegerMathAbs(instr);
2934  __ Integer32ToSmi(input_reg, input_reg);
2935  __ bind(deferred->exit());
2936  }
2937 }
2938 
2939 
2940 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2941  XMMRegister xmm_scratch = xmm0;
2942  Register output_reg = ToRegister(instr->result());
2943  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2944  Label done;
2945 
2947  CpuFeatures::Scope scope(SSE4_1);
2948  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2949  // Deoptimize if minus zero.
2950  __ movq(output_reg, input_reg);
2951  __ subq(output_reg, Immediate(1));
2952  DeoptimizeIf(overflow, instr->environment());
2953  }
2954  __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
2955  __ cvttsd2si(output_reg, xmm_scratch);
2956  __ cmpl(output_reg, Immediate(0x80000000));
2957  DeoptimizeIf(equal, instr->environment());
2958  } else {
2959  // Deoptimize on negative inputs.
2960  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
2961  __ ucomisd(input_reg, xmm_scratch);
2962  DeoptimizeIf(below, instr->environment());
2963  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2964  // Check for negative zero.
2965  Label positive_sign;
2966  __ j(above, &positive_sign, Label::kNear);
2967  __ movmskpd(output_reg, input_reg);
2968  __ testq(output_reg, Immediate(1));
2969  DeoptimizeIf(not_zero, instr->environment());
2970  __ Set(output_reg, 0);
2971  __ jmp(&done);
2972  __ bind(&positive_sign);
2973  }
2974 
2975  // Use truncating instruction (OK because input is positive).
2976  __ cvttsd2si(output_reg, input_reg);
2977 
2978  // Overflow is signalled with minint.
2979  __ cmpl(output_reg, Immediate(0x80000000));
2980  DeoptimizeIf(equal, instr->environment());
2981  }
2982  __ bind(&done);
2983 }
2984 
2985 
2986 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2987  const XMMRegister xmm_scratch = xmm0;
2988  Register output_reg = ToRegister(instr->result());
2989  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2990 
2991  Label done;
2992  // xmm_scratch = 0.5
2993  __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
2994  __ movq(xmm_scratch, kScratchRegister);
2995  Label below_half;
2996  __ ucomisd(xmm_scratch, input_reg);
2997  // If input_reg is NaN, this doesn't jump.
2998  __ j(above, &below_half, Label::kNear);
2999  // input = input + 0.5
3000  // This addition might give a result that isn't the correct for
3001  // rounding, due to loss of precision, but only for a number that's
3002  // so big that the conversion below will overflow anyway.
3003  __ addsd(xmm_scratch, input_reg);
3004  // Compute Math.floor(input).
3005  // Use truncating instruction (OK because input is positive).
3006  __ cvttsd2si(output_reg, xmm_scratch);
3007  // Overflow is signalled with minint.
3008  __ cmpl(output_reg, Immediate(0x80000000));
3009  DeoptimizeIf(equal, instr->environment());
3010  __ jmp(&done);
3011 
3012  __ bind(&below_half);
3013  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3014  // Bailout if negative (including -0).
3015  __ movq(output_reg, input_reg);
3016  __ testq(output_reg, output_reg);
3017  DeoptimizeIf(negative, instr->environment());
3018  } else {
3019  // Bailout if below -0.5, otherwise round to (positive) zero, even
3020  // if negative.
3021  // xmm_scrach = -0.5
3022  __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
3023  __ movq(xmm_scratch, kScratchRegister);
3024  __ ucomisd(input_reg, xmm_scratch);
3025  DeoptimizeIf(below, instr->environment());
3026  }
3027  __ xorl(output_reg, output_reg);
3028 
3029  __ bind(&done);
3030 }
3031 
3032 
3033 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3034  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3035  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3036  __ sqrtsd(input_reg, input_reg);
3037 }
3038 
3039 
3040 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3041  XMMRegister xmm_scratch = xmm0;
3042  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3043  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3044 
3045  // Note that according to ECMA-262 15.8.2.13:
3046  // Math.pow(-Infinity, 0.5) == Infinity
3047  // Math.sqrt(-Infinity) == NaN
3048  Label done, sqrt;
3049  // Check base for -Infinity. According to IEEE-754, double-precision
3050  // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3051  __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
3052  __ movq(xmm_scratch, kScratchRegister);
3053  __ ucomisd(xmm_scratch, input_reg);
3054  // Comparing -Infinity with NaN results in "unordered", which sets the
3055  // zero flag as if both were equal. However, it also sets the carry flag.
3056  __ j(not_equal, &sqrt, Label::kNear);
3057  __ j(carry, &sqrt, Label::kNear);
3058  // If input is -Infinity, return Infinity.
3059  __ xorps(input_reg, input_reg);
3060  __ subsd(input_reg, xmm_scratch);
3061  __ jmp(&done, Label::kNear);
3062 
3063  // Square root.
3064  __ bind(&sqrt);
3065  __ xorps(xmm_scratch, xmm_scratch);
3066  __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3067  __ sqrtsd(input_reg, input_reg);
3068  __ bind(&done);
3069 }
3070 
3071 
3072 void LCodeGen::DoPower(LPower* instr) {
3073  Representation exponent_type = instr->hydrogen()->right()->representation();
3074  // Having marked this as a call, we can use any registers.
3075  // Just make sure that the input/output registers are the expected ones.
3076 
3077  // Choose register conforming to calling convention (when bailing out).
3078 #ifdef _WIN64
3079  Register exponent = rdx;
3080 #else
3081  Register exponent = rdi;
3082 #endif
3083  ASSERT(!instr->InputAt(1)->IsRegister() ||
3084  ToRegister(instr->InputAt(1)).is(exponent));
3085  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3086  ToDoubleRegister(instr->InputAt(1)).is(xmm1));
3087  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
3088  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
3089 
3090  if (exponent_type.IsTagged()) {
3091  Label no_deopt;
3092  __ JumpIfSmi(exponent, &no_deopt);
3093  __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
3094  DeoptimizeIf(not_equal, instr->environment());
3095  __ bind(&no_deopt);
3096  MathPowStub stub(MathPowStub::TAGGED);
3097  __ CallStub(&stub);
3098  } else if (exponent_type.IsInteger32()) {
3099  MathPowStub stub(MathPowStub::INTEGER);
3100  __ CallStub(&stub);
3101  } else {
3102  ASSERT(exponent_type.IsDouble());
3103  MathPowStub stub(MathPowStub::DOUBLE);
3104  __ CallStub(&stub);
3105  }
3106 }
3107 
3108 
3109 void LCodeGen::DoRandom(LRandom* instr) {
3110  class DeferredDoRandom: public LDeferredCode {
3111  public:
3112  DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3113  : LDeferredCode(codegen), instr_(instr) { }
3114  virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3115  virtual LInstruction* instr() { return instr_; }
3116  private:
3117  LRandom* instr_;
3118  };
3119 
3120  DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3121 
3122  // Having marked this instruction as a call we can use any
3123  // registers.
3124  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3125 
3126  // Choose the right register for the first argument depending on
3127  // calling convention.
3128 #ifdef _WIN64
3129  ASSERT(ToRegister(instr->InputAt(0)).is(rcx));
3130  Register global_object = rcx;
3131 #else
3132  ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
3133  Register global_object = rdi;
3134 #endif
3135 
3136  static const int kSeedSize = sizeof(uint32_t);
3137  STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
3138 
3139  __ movq(global_object,
3141  static const int kRandomSeedOffset =
3143  __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
3144  // rbx: FixedArray of the global context's random seeds
3145 
3146  // Load state[0].
3148  // If state[0] == 0, call runtime to initialize seeds.
3149  __ testl(rax, rax);
3150  __ j(zero, deferred->entry());
3151  // Load state[1].
3152  __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
3153 
3154  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3155  // Only operate on the lower 32 bit of rax.
3156  __ movl(rdx, rax);
3157  __ andl(rdx, Immediate(0xFFFF));
3158  __ imull(rdx, rdx, Immediate(18273));
3159  __ shrl(rax, Immediate(16));
3160  __ addl(rax, rdx);
3161  // Save state[0].
3163 
3164  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3165  __ movl(rdx, rcx);
3166  __ andl(rdx, Immediate(0xFFFF));
3167  __ imull(rdx, rdx, Immediate(36969));
3168  __ shrl(rcx, Immediate(16));
3169  __ addl(rcx, rdx);
3170  // Save state[1].
3171  __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
3172 
3173  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3174  __ shll(rax, Immediate(14));
3175  __ andl(rcx, Immediate(0x3FFFF));
3176  __ addl(rax, rcx);
3177 
3178  __ bind(deferred->exit());
3179  // Convert 32 random bits in rax to 0.(32 random bits) in a double
3180  // by computing:
3181  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
3182  __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
3183  __ movd(xmm2, rcx);
3184  __ movd(xmm1, rax);
3185  __ cvtss2sd(xmm2, xmm2);
3186  __ xorps(xmm1, xmm2);
3187  __ subsd(xmm1, xmm2);
3188 }
3189 
3190 
3191 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3192  __ PrepareCallCFunction(1);
3193  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3195  // Return value is in rax.
3196 }
3197 
3198 
3199 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3200  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3201  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3203  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3204 }
3205 
3206 
3207 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3208  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3209  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3211  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3212 }
3213 
3214 
3215 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3216  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3217  TranscendentalCacheStub stub(TranscendentalCache::COS,
3219  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3220 }
3221 
3222 
3223 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3224  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3225  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3227  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3228 }
3229 
3230 
3231 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3232  switch (instr->op()) {
3233  case kMathAbs:
3234  DoMathAbs(instr);
3235  break;
3236  case kMathFloor:
3237  DoMathFloor(instr);
3238  break;
3239  case kMathRound:
3240  DoMathRound(instr);
3241  break;
3242  case kMathSqrt:
3243  DoMathSqrt(instr);
3244  break;
3245  case kMathPowHalf:
3246  DoMathPowHalf(instr);
3247  break;
3248  case kMathCos:
3249  DoMathCos(instr);
3250  break;
3251  case kMathSin:
3252  DoMathSin(instr);
3253  break;
3254  case kMathTan:
3255  DoMathTan(instr);
3256  break;
3257  case kMathLog:
3258  DoMathLog(instr);
3259  break;
3260 
3261  default:
3262  UNREACHABLE();
3263  }
3264 }
3265 
3266 
3267 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3268  ASSERT(ToRegister(instr->function()).is(rdi));
3269  ASSERT(instr->HasPointerMap());
3270 
3271  if (instr->known_function().is_null()) {
3272  LPointerMap* pointers = instr->pointer_map();
3273  RecordPosition(pointers->position());
3274  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3275  ParameterCount count(instr->arity());
3276  __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3278  } else {
3279  CallKnownFunction(instr->known_function(),
3280  instr->arity(),
3281  instr,
3283  RDI_CONTAINS_TARGET);
3284  }
3285 }
3286 
3287 
3288 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3289  ASSERT(ToRegister(instr->key()).is(rcx));
3290  ASSERT(ToRegister(instr->result()).is(rax));
3291 
3292  int arity = instr->arity();
3293  Handle<Code> ic =
3294  isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3295  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3297 }
3298 
3299 
3300 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3301  ASSERT(ToRegister(instr->result()).is(rax));
3302 
3303  int arity = instr->arity();
3304  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3305  Handle<Code> ic =
3306  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3307  __ Move(rcx, instr->name());
3308  CallCode(ic, mode, instr);
3310 }
3311 
3312 
3313 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3314  ASSERT(ToRegister(instr->function()).is(rdi));
3315  ASSERT(ToRegister(instr->result()).is(rax));
3316 
3317  int arity = instr->arity();
3318  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3319  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3321 }
3322 
3323 
3324 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3325  ASSERT(ToRegister(instr->result()).is(rax));
3326  int arity = instr->arity();
3327  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3328  Handle<Code> ic =
3329  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3330  __ Move(rcx, instr->name());
3331  CallCode(ic, mode, instr);
3333 }
3334 
3335 
3336 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3337  ASSERT(ToRegister(instr->result()).is(rax));
3338  CallKnownFunction(instr->target(),
3339  instr->arity(),
3340  instr,
3342  RDI_UNINITIALIZED);
3343 }
3344 
3345 
3346 void LCodeGen::DoCallNew(LCallNew* instr) {
3347  ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
3348  ASSERT(ToRegister(instr->result()).is(rax));
3349 
3350  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3351  __ Set(rax, instr->arity());
3352  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3353 }
3354 
3355 
3356 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3357  CallRuntime(instr->function(), instr->arity(), instr);
3358 }
3359 
3360 
3361 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3362  Register object = ToRegister(instr->object());
3363  Register value = ToRegister(instr->value());
3364  int offset = instr->offset();
3365 
3366  if (!instr->transition().is_null()) {
3367  if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
3368  __ Move(FieldOperand(object, HeapObject::kMapOffset),
3369  instr->transition());
3370  } else {
3371  Register temp = ToRegister(instr->TempAt(0));
3372  __ Move(kScratchRegister, instr->transition());
3374  // Update the write barrier for the map field.
3375  __ RecordWriteField(object,
3378  temp,
3379  kSaveFPRegs,
3381  OMIT_SMI_CHECK);
3382  }
3383  }
3384 
3385  // Do the store.
3386  HType type = instr->hydrogen()->value()->type();
3387  SmiCheck check_needed =
3388  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3389  if (instr->is_in_object()) {
3390  __ movq(FieldOperand(object, offset), value);
3391  if (instr->hydrogen()->NeedsWriteBarrier()) {
3392  Register temp = ToRegister(instr->TempAt(0));
3393  // Update the write barrier for the object for in-object properties.
3394  __ RecordWriteField(object,
3395  offset,
3396  value,
3397  temp,
3398  kSaveFPRegs,
3400  check_needed);
3401  }
3402  } else {
3403  Register temp = ToRegister(instr->TempAt(0));
3404  __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
3405  __ movq(FieldOperand(temp, offset), value);
3406  if (instr->hydrogen()->NeedsWriteBarrier()) {
3407  // Update the write barrier for the properties array.
3408  // object is used as a scratch register.
3409  __ RecordWriteField(temp,
3410  offset,
3411  value,
3412  object,
3413  kSaveFPRegs,
3415  check_needed);
3416  }
3417  }
3418 }
3419 
3420 
3421 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3422  ASSERT(ToRegister(instr->object()).is(rdx));
3423  ASSERT(ToRegister(instr->value()).is(rax));
3424 
3425  __ Move(rcx, instr->hydrogen()->name());
3426  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3427  ? isolate()->builtins()->StoreIC_Initialize_Strict()
3428  : isolate()->builtins()->StoreIC_Initialize();
3429  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3430 }
3431 
3432 
3433 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3434  LStoreKeyedSpecializedArrayElement* instr) {
3435  ElementsKind elements_kind = instr->elements_kind();
3436  Operand operand(BuildFastArrayOperand(instr->external_pointer(),
3437  instr->key(),
3438  elements_kind,
3439  0,
3440  instr->additional_index()));
3441 
3442  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
3443  // Sign extend key because it could be a 32 bit negative value
3444  // and the dehoisted address computation happens in 64 bits
3445  Register key_reg = ToRegister(instr->key());
3446  __ movsxlq(key_reg, key_reg);
3447  }
3448 
3449  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3450  XMMRegister value(ToDoubleRegister(instr->value()));
3451  __ cvtsd2ss(value, value);
3452  __ movss(operand, value);
3453  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3454  __ movsd(operand, ToDoubleRegister(instr->value()));
3455  } else {
3456  Register value(ToRegister(instr->value()));
3457  switch (elements_kind) {
3461  __ movb(operand, value);
3462  break;
3465  __ movw(operand, value);
3466  break;
3467  case EXTERNAL_INT_ELEMENTS:
3469  __ movl(operand, value);
3470  break;
3473  case FAST_ELEMENTS:
3474  case FAST_SMI_ELEMENTS:
3475  case FAST_DOUBLE_ELEMENTS:
3476  case FAST_HOLEY_ELEMENTS:
3479  case DICTIONARY_ELEMENTS:
3481  UNREACHABLE();
3482  break;
3483  }
3484  }
3485 }
3486 
3487 
3488 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3489  if (instr->length()->IsRegister()) {
3490  Register reg = ToRegister(instr->length());
3491  if (FLAG_debug_code) {
3492  __ AbortIfNotZeroExtended(reg);
3493  }
3494  if (instr->index()->IsConstantOperand()) {
3495  __ cmpq(reg,
3496  Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
3497  } else {
3498  Register reg2 = ToRegister(instr->index());
3499  if (FLAG_debug_code) {
3500  __ AbortIfNotZeroExtended(reg2);
3501  }
3502  __ cmpq(reg, reg2);
3503  }
3504  } else {
3505  if (instr->index()->IsConstantOperand()) {
3506  __ cmpq(ToOperand(instr->length()),
3507  Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
3508  } else {
3509  __ cmpq(ToOperand(instr->length()), ToRegister(instr->index()));
3510  }
3511  }
3512  DeoptimizeIf(below_equal, instr->environment());
3513 }
3514 
3515 
3516 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3517  Register value = ToRegister(instr->value());
3518  Register elements = ToRegister(instr->object());
3519  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3520 
3521  Operand operand =
3522  BuildFastArrayOperand(instr->object(),
3523  instr->key(),
3524  FAST_ELEMENTS,
3526  instr->additional_index());
3527 
3528  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
3529  // Sign extend key because it could be a 32 bit negative value
3530  // and the dehoisted address computation happens in 64 bits
3531  Register key_reg = ToRegister(instr->key());
3532  __ movsxlq(key_reg, key_reg);
3533  }
3534 
3535  __ movq(operand, value);
3536 
3537  if (instr->hydrogen()->NeedsWriteBarrier()) {
3538  ASSERT(!instr->key()->IsConstantOperand());
3539  HType type = instr->hydrogen()->value()->type();
3540  SmiCheck check_needed =
3541  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3542  // Compute address of modified element and store it into key register.
3543  __ lea(key, operand);
3544  __ RecordWrite(elements,
3545  key,
3546  value,
3547  kSaveFPRegs,
3549  check_needed);
3550  }
3551 }
3552 
3553 
3554 void LCodeGen::DoStoreKeyedFastDoubleElement(
3555  LStoreKeyedFastDoubleElement* instr) {
3556  XMMRegister value = ToDoubleRegister(instr->value());
3557 
3558  if (instr->NeedsCanonicalization()) {
3559  Label have_value;
3560 
3561  __ ucomisd(value, value);
3562  __ j(parity_odd, &have_value); // NaN.
3563 
3564  __ Set(kScratchRegister, BitCast<uint64_t>(
3566  __ movq(value, kScratchRegister);
3567 
3568  __ bind(&have_value);
3569  }
3570 
3571  Operand double_store_operand = BuildFastArrayOperand(
3572  instr->elements(),
3573  instr->key(),
3576  instr->additional_index());
3577 
3578  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
3579  // Sign extend key because it could be a 32 bit negative value
3580  // and the dehoisted address computation happens in 64 bits
3581  Register key_reg = ToRegister(instr->key());
3582  __ movsxlq(key_reg, key_reg);
3583  }
3584 
3585  __ movsd(double_store_operand, value);
3586 }
3587 
3588 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3589  ASSERT(ToRegister(instr->object()).is(rdx));
3590  ASSERT(ToRegister(instr->key()).is(rcx));
3591  ASSERT(ToRegister(instr->value()).is(rax));
3592 
3593  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3594  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3595  : isolate()->builtins()->KeyedStoreIC_Initialize();
3596  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3597 }
3598 
3599 
3600 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3601  Register object_reg = ToRegister(instr->object());
3602  Register new_map_reg = ToRegister(instr->new_map_reg());
3603 
3604  Handle<Map> from_map = instr->original_map();
3605  Handle<Map> to_map = instr->transitioned_map();
3606  ElementsKind from_kind = from_map->elements_kind();
3607  ElementsKind to_kind = to_map->elements_kind();
3608 
3609  Label not_applicable;
3610  __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
3611  __ j(not_equal, &not_applicable);
3612  __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
3613  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
3614  __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
3615  // Write barrier.
3616  ASSERT_NE(instr->temp_reg(), NULL);
3617  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3618  ToRegister(instr->temp_reg()), kDontSaveFPRegs);
3619  } else if (IsFastSmiElementsKind(from_kind) &&
3620  IsFastDoubleElementsKind(to_kind)) {
3621  Register fixed_object_reg = ToRegister(instr->temp_reg());
3622  ASSERT(fixed_object_reg.is(rdx));
3623  ASSERT(new_map_reg.is(rbx));
3624  __ movq(fixed_object_reg, object_reg);
3625  CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3626  RelocInfo::CODE_TARGET, instr);
3627  } else if (IsFastDoubleElementsKind(from_kind) &&
3628  IsFastObjectElementsKind(to_kind)) {
3629  Register fixed_object_reg = ToRegister(instr->temp_reg());
3630  ASSERT(fixed_object_reg.is(rdx));
3631  ASSERT(new_map_reg.is(rbx));
3632  __ movq(fixed_object_reg, object_reg);
3633  CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3634  RelocInfo::CODE_TARGET, instr);
3635  } else {
3636  UNREACHABLE();
3637  }
3638  __ bind(&not_applicable);
3639 }
3640 
3641 
3642 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3643  EmitPushTaggedOperand(instr->left());
3644  EmitPushTaggedOperand(instr->right());
3645  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3646  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3647 }
3648 
3649 
3650 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3651  class DeferredStringCharCodeAt: public LDeferredCode {
3652  public:
3653  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3654  : LDeferredCode(codegen), instr_(instr) { }
3655  virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3656  virtual LInstruction* instr() { return instr_; }
3657  private:
3658  LStringCharCodeAt* instr_;
3659  };
3660 
3661  DeferredStringCharCodeAt* deferred =
3662  new(zone()) DeferredStringCharCodeAt(this, instr);
3663 
3665  ToRegister(instr->string()),
3666  ToRegister(instr->index()),
3667  ToRegister(instr->result()),
3668  deferred->entry());
3669  __ bind(deferred->exit());
3670 }
3671 
3672 
3673 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3674  Register string = ToRegister(instr->string());
3675  Register result = ToRegister(instr->result());
3676 
3677  // TODO(3095996): Get rid of this. For now, we need to make the
3678  // result register contain a valid pointer because it is already
3679  // contained in the register pointer map.
3680  __ Set(result, 0);
3681 
3682  PushSafepointRegistersScope scope(this);
3683  __ push(string);
3684  // Push the index as a smi. This is safe because of the checks in
3685  // DoStringCharCodeAt above.
3687  if (instr->index()->IsConstantOperand()) {
3688  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3689  __ Push(Smi::FromInt(const_index));
3690  } else {
3691  Register index = ToRegister(instr->index());
3692  __ Integer32ToSmi(index, index);
3693  __ push(index);
3694  }
3695  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3696  if (FLAG_debug_code) {
3697  __ AbortIfNotSmi(rax);
3698  }
3699  __ SmiToInteger32(rax, rax);
3700  __ StoreToSafepointRegisterSlot(result, rax);
3701 }
3702 
3703 
3704 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3705  class DeferredStringCharFromCode: public LDeferredCode {
3706  public:
3707  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3708  : LDeferredCode(codegen), instr_(instr) { }
3709  virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3710  virtual LInstruction* instr() { return instr_; }
3711  private:
3712  LStringCharFromCode* instr_;
3713  };
3714 
3715  DeferredStringCharFromCode* deferred =
3716  new(zone()) DeferredStringCharFromCode(this, instr);
3717 
3718  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3719  Register char_code = ToRegister(instr->char_code());
3720  Register result = ToRegister(instr->result());
3721  ASSERT(!char_code.is(result));
3722 
3723  __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode));
3724  __ j(above, deferred->entry());
3725  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3726  __ movq(result, FieldOperand(result,
3727  char_code, times_pointer_size,
3729  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
3730  __ j(equal, deferred->entry());
3731  __ bind(deferred->exit());
3732 }
3733 
3734 
3735 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3736  Register char_code = ToRegister(instr->char_code());
3737  Register result = ToRegister(instr->result());
3738 
3739  // TODO(3095996): Get rid of this. For now, we need to make the
3740  // result register contain a valid pointer because it is already
3741  // contained in the register pointer map.
3742  __ Set(result, 0);
3743 
3744  PushSafepointRegistersScope scope(this);
3745  __ Integer32ToSmi(char_code, char_code);
3746  __ push(char_code);
3747  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3748  __ StoreToSafepointRegisterSlot(result, rax);
3749 }
3750 
3751 
3752 void LCodeGen::DoStringLength(LStringLength* instr) {
3753  Register string = ToRegister(instr->string());
3754  Register result = ToRegister(instr->result());
3755  __ movq(result, FieldOperand(string, String::kLengthOffset));
3756 }
3757 
3758 
3759 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3760  LOperand* input = instr->InputAt(0);
3761  ASSERT(input->IsRegister() || input->IsStackSlot());
3762  LOperand* output = instr->result();
3763  ASSERT(output->IsDoubleRegister());
3764  if (input->IsRegister()) {
3765  __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
3766  } else {
3767  __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
3768  }
3769 }
3770 
3771 
3772 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3773  LOperand* input = instr->InputAt(0);
3774  ASSERT(input->IsRegister() && input->Equals(instr->result()));
3775  Register reg = ToRegister(input);
3776 
3777  __ Integer32ToSmi(reg, reg);
3778 }
3779 
3780 
3781 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3782  class DeferredNumberTagD: public LDeferredCode {
3783  public:
3784  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3785  : LDeferredCode(codegen), instr_(instr) { }
3786  virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3787  virtual LInstruction* instr() { return instr_; }
3788  private:
3789  LNumberTagD* instr_;
3790  };
3791 
3792  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3793  Register reg = ToRegister(instr->result());
3794  Register tmp = ToRegister(instr->TempAt(0));
3795 
3796  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
3797  if (FLAG_inline_new) {
3798  __ AllocateHeapNumber(reg, tmp, deferred->entry());
3799  } else {
3800  __ jmp(deferred->entry());
3801  }
3802  __ bind(deferred->exit());
3803  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
3804 }
3805 
3806 
3807 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3808  // TODO(3095996): Get rid of this. For now, we need to make the
3809  // result register contain a valid pointer because it is already
3810  // contained in the register pointer map.
3811  Register reg = ToRegister(instr->result());
3812  __ Move(reg, Smi::FromInt(0));
3813 
3814  {
3815  PushSafepointRegistersScope scope(this);
3816  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3817  // Ensure that value in rax survives popping registers.
3818  __ movq(kScratchRegister, rax);
3819  }
3820  __ movq(reg, kScratchRegister);
3821 }
3822 
3823 
3824 void LCodeGen::DoSmiTag(LSmiTag* instr) {
3825  ASSERT(instr->InputAt(0)->Equals(instr->result()));
3826  Register input = ToRegister(instr->InputAt(0));
3827  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3828  __ Integer32ToSmi(input, input);
3829 }
3830 
3831 
3832 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3833  ASSERT(instr->InputAt(0)->Equals(instr->result()));
3834  Register input = ToRegister(instr->InputAt(0));
3835  if (instr->needs_check()) {
3836  Condition is_smi = __ CheckSmi(input);
3837  DeoptimizeIf(NegateCondition(is_smi), instr->environment());
3838  } else {
3839  if (FLAG_debug_code) {
3840  __ AbortIfNotSmi(input);
3841  }
3842  }
3843  __ SmiToInteger32(input, input);
3844 }
3845 
3846 
3847 void LCodeGen::EmitNumberUntagD(Register input_reg,
3848  XMMRegister result_reg,
3849  bool deoptimize_on_undefined,
3850  bool deoptimize_on_minus_zero,
3851  LEnvironment* env) {
3852  Label load_smi, done;
3853 
3854  // Smi check.
3855  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
3856 
3857  // Heap number map check.
3858  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3859  Heap::kHeapNumberMapRootIndex);
3860  if (deoptimize_on_undefined) {
3861  DeoptimizeIf(not_equal, env);
3862  } else {
3863  Label heap_number;
3864  __ j(equal, &heap_number, Label::kNear);
3865 
3866  __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
3867  DeoptimizeIf(not_equal, env);
3868 
3869  // Convert undefined to NaN. Compute NaN as 0/0.
3870  __ xorps(result_reg, result_reg);
3871  __ divsd(result_reg, result_reg);
3872  __ jmp(&done, Label::kNear);
3873 
3874  __ bind(&heap_number);
3875  }
3876  // Heap number to XMM conversion.
3877  __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
3878  if (deoptimize_on_minus_zero) {
3879  XMMRegister xmm_scratch = xmm0;
3880  __ xorps(xmm_scratch, xmm_scratch);
3881  __ ucomisd(xmm_scratch, result_reg);
3882  __ j(not_equal, &done, Label::kNear);
3883  __ movmskpd(kScratchRegister, result_reg);
3884  __ testq(kScratchRegister, Immediate(1));
3885  DeoptimizeIf(not_zero, env);
3886  }
3887  __ jmp(&done, Label::kNear);
3888 
3889  // Smi to XMM conversion
3890  __ bind(&load_smi);
3891  __ SmiToInteger32(kScratchRegister, input_reg);
3892  __ cvtlsi2sd(result_reg, kScratchRegister);
3893  __ bind(&done);
3894 }
3895 
3896 
3897 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3898  Label done, heap_number;
3899  Register input_reg = ToRegister(instr->InputAt(0));
3900 
3901  // Heap number map check.
3902  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3903  Heap::kHeapNumberMapRootIndex);
3904 
3905  if (instr->truncating()) {
3906  __ j(equal, &heap_number, Label::kNear);
3907  // Check for undefined. Undefined is converted to zero for truncating
3908  // conversions.
3909  __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
3910  DeoptimizeIf(not_equal, instr->environment());
3911  __ Set(input_reg, 0);
3912  __ jmp(&done, Label::kNear);
3913 
3914  __ bind(&heap_number);
3915 
3916  __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3917  __ cvttsd2siq(input_reg, xmm0);
3918  __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
3919  __ cmpq(input_reg, kScratchRegister);
3920  DeoptimizeIf(equal, instr->environment());
3921  } else {
3922  // Deoptimize if we don't have a heap number.
3923  DeoptimizeIf(not_equal, instr->environment());
3924 
3925  XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
3926  __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3927  __ cvttsd2si(input_reg, xmm0);
3928  __ cvtlsi2sd(xmm_temp, input_reg);
3929  __ ucomisd(xmm0, xmm_temp);
3930  DeoptimizeIf(not_equal, instr->environment());
3931  DeoptimizeIf(parity_even, instr->environment()); // NaN.
3932  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3933  __ testl(input_reg, input_reg);
3934  __ j(not_zero, &done);
3935  __ movmskpd(input_reg, xmm0);
3936  __ andl(input_reg, Immediate(1));
3937  DeoptimizeIf(not_zero, instr->environment());
3938  }
3939  }
3940  __ bind(&done);
3941 }
3942 
3943 
3944 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
3945  class DeferredTaggedToI: public LDeferredCode {
3946  public:
3947  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
3948  : LDeferredCode(codegen), instr_(instr) { }
3949  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
3950  virtual LInstruction* instr() { return instr_; }
3951  private:
3952  LTaggedToI* instr_;
3953  };
3954 
3955  LOperand* input = instr->InputAt(0);
3956  ASSERT(input->IsRegister());
3957  ASSERT(input->Equals(instr->result()));
3958 
3959  Register input_reg = ToRegister(input);
3960  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
3961  __ JumpIfNotSmi(input_reg, deferred->entry());
3962  __ SmiToInteger32(input_reg, input_reg);
3963  __ bind(deferred->exit());
3964 }
3965 
3966 
3967 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
3968  LOperand* input = instr->InputAt(0);
3969  ASSERT(input->IsRegister());
3970  LOperand* result = instr->result();
3971  ASSERT(result->IsDoubleRegister());
3972 
3973  Register input_reg = ToRegister(input);
3974  XMMRegister result_reg = ToDoubleRegister(result);
3975 
3976  EmitNumberUntagD(input_reg, result_reg,
3977  instr->hydrogen()->deoptimize_on_undefined(),
3978  instr->hydrogen()->deoptimize_on_minus_zero(),
3979  instr->environment());
3980 }
3981 
3982 
3983 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
3984  LOperand* input = instr->InputAt(0);
3985  ASSERT(input->IsDoubleRegister());
3986  LOperand* result = instr->result();
3987  ASSERT(result->IsRegister());
3988 
3989  XMMRegister input_reg = ToDoubleRegister(input);
3990  Register result_reg = ToRegister(result);
3991 
3992  if (instr->truncating()) {
3993  // Performs a truncating conversion of a floating point number as used by
3994  // the JS bitwise operations.
3995  __ cvttsd2siq(result_reg, input_reg);
3996  __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
3997  __ cmpq(result_reg, kScratchRegister);
3998  DeoptimizeIf(equal, instr->environment());
3999  } else {
4000  __ cvttsd2si(result_reg, input_reg);
4001  __ cvtlsi2sd(xmm0, result_reg);
4002  __ ucomisd(xmm0, input_reg);
4003  DeoptimizeIf(not_equal, instr->environment());
4004  DeoptimizeIf(parity_even, instr->environment()); // NaN.
4005  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4006  Label done;
4007  // The integer converted back is equal to the original. We
4008  // only have to test if we got -0 as an input.
4009  __ testl(result_reg, result_reg);
4010  __ j(not_zero, &done, Label::kNear);
4011  __ movmskpd(result_reg, input_reg);
4012  // Bit 0 contains the sign of the double in input_reg.
4013  // If input was positive, we are ok and return 0, otherwise
4014  // deoptimize.
4015  __ andl(result_reg, Immediate(1));
4016  DeoptimizeIf(not_zero, instr->environment());
4017  __ bind(&done);
4018  }
4019  }
4020 }
4021 
4022 
4023 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4024  LOperand* input = instr->InputAt(0);
4025  Condition cc = masm()->CheckSmi(ToRegister(input));
4026  DeoptimizeIf(NegateCondition(cc), instr->environment());
4027 }
4028 
4029 
4030 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4031  LOperand* input = instr->InputAt(0);
4032  Condition cc = masm()->CheckSmi(ToRegister(input));
4033  DeoptimizeIf(cc, instr->environment());
4034 }
4035 
4036 
4037 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4038  Register input = ToRegister(instr->InputAt(0));
4039 
4041 
4042  if (instr->hydrogen()->is_interval_check()) {
4043  InstanceType first;
4044  InstanceType last;
4045  instr->hydrogen()->GetCheckInterval(&first, &last);
4046 
4048  Immediate(static_cast<int8_t>(first)));
4049 
4050  // If there is only one type in the interval check for equality.
4051  if (first == last) {
4052  DeoptimizeIf(not_equal, instr->environment());
4053  } else {
4054  DeoptimizeIf(below, instr->environment());
4055  // Omit check for the last type.
4056  if (last != LAST_TYPE) {
4058  Immediate(static_cast<int8_t>(last)));
4059  DeoptimizeIf(above, instr->environment());
4060  }
4061  }
4062  } else {
4063  uint8_t mask;
4064  uint8_t tag;
4065  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4066 
4067  if (IsPowerOf2(mask)) {
4068  ASSERT(tag == 0 || IsPowerOf2(tag));
4070  Immediate(mask));
4071  DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
4072  } else {
4073  __ movzxbl(kScratchRegister,
4075  __ andb(kScratchRegister, Immediate(mask));
4076  __ cmpb(kScratchRegister, Immediate(tag));
4077  DeoptimizeIf(not_equal, instr->environment());
4078  }
4079  }
4080 }
4081 
4082 
4083 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4084  Register reg = ToRegister(instr->value());
4085  Handle<JSFunction> target = instr->hydrogen()->target();
4086  if (isolate()->heap()->InNewSpace(*target)) {
4087  Handle<JSGlobalPropertyCell> cell =
4088  isolate()->factory()->NewJSGlobalPropertyCell(target);
4089  __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
4090  __ cmpq(reg, Operand(kScratchRegister, 0));
4091  } else {
4092  __ Cmp(reg, target);
4093  }
4094  DeoptimizeIf(not_equal, instr->environment());
4095 }
4096 
4097 
4098 void LCodeGen::DoCheckMapCommon(Register reg,
4099  Handle<Map> map,
4100  CompareMapMode mode,
4101  LEnvironment* env) {
4102  Label success;
4103  __ CompareMap(reg, map, &success, mode);
4104  DeoptimizeIf(not_equal, env);
4105  __ bind(&success);
4106 }
4107 
4108 
4109 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4110  LOperand* input = instr->InputAt(0);
4111  ASSERT(input->IsRegister());
4112  Register reg = ToRegister(input);
4113 
4114  Label success;
4115  SmallMapList* map_set = instr->hydrogen()->map_set();
4116  for (int i = 0; i < map_set->length() - 1; i++) {
4117  Handle<Map> map = map_set->at(i);
4118  __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
4119  __ j(equal, &success);
4120  }
4121  Handle<Map> map = map_set->last();
4122  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
4123  __ bind(&success);
4124 }
4125 
4126 
4127 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4128  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
4129  Register result_reg = ToRegister(instr->result());
4130  Register temp_reg = ToRegister(instr->TempAt(0));
4131  __ ClampDoubleToUint8(value_reg, xmm0, result_reg, temp_reg);
4132 }
4133 
4134 
4135 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4136  ASSERT(instr->unclamped()->Equals(instr->result()));
4137  Register value_reg = ToRegister(instr->result());
4138  __ ClampUint8(value_reg);
4139 }
4140 
4141 
4142 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4143  ASSERT(instr->unclamped()->Equals(instr->result()));
4144  Register input_reg = ToRegister(instr->unclamped());
4145  Register temp_reg = ToRegister(instr->TempAt(0));
4146  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->TempAt(1));
4147  Label is_smi, done, heap_number;
4148 
4149  __ JumpIfSmi(input_reg, &is_smi);
4150 
4151  // Check for heap number
4152  __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4153  factory()->heap_number_map());
4154  __ j(equal, &heap_number, Label::kNear);
4155 
4156  // Check for undefined. Undefined is converted to zero for clamping
4157  // conversions.
4158  __ Cmp(input_reg, factory()->undefined_value());
4159  DeoptimizeIf(not_equal, instr->environment());
4160  __ movq(input_reg, Immediate(0));
4161  __ jmp(&done, Label::kNear);
4162 
4163  // Heap number
4164  __ bind(&heap_number);
4165  __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4166  __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg, temp_reg);
4167  __ jmp(&done, Label::kNear);
4168 
4169  // smi
4170  __ bind(&is_smi);
4171  __ SmiToInteger32(input_reg, input_reg);
4172  __ ClampUint8(input_reg);
4173 
4174  __ bind(&done);
4175 }
4176 
4177 
4178 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4179  Register reg = ToRegister(instr->TempAt(0));
4180 
4181  Handle<JSObject> holder = instr->holder();
4182  Handle<JSObject> current_prototype = instr->prototype();
4183 
4184  // Load prototype object.
4185  __ LoadHeapObject(reg, current_prototype);
4186 
4187  // Check prototype maps up to the holder.
4188  while (!current_prototype.is_identical_to(holder)) {
4189  DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4190  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4191  current_prototype =
4192  Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4193  // Load next prototype object.
4194  __ LoadHeapObject(reg, current_prototype);
4195  }
4196 
4197  // Check the holder map.
4198  DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4199  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4200 }
4201 
4202 
4203 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4204  class DeferredAllocateObject: public LDeferredCode {
4205  public:
4206  DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4207  : LDeferredCode(codegen), instr_(instr) { }
4208  virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4209  virtual LInstruction* instr() { return instr_; }
4210  private:
4211  LAllocateObject* instr_;
4212  };
4213 
4214  DeferredAllocateObject* deferred =
4215  new(zone()) DeferredAllocateObject(this, instr);
4216 
4217  Register result = ToRegister(instr->result());
4218  Register scratch = ToRegister(instr->TempAt(0));
4219  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4220  Handle<Map> initial_map(constructor->initial_map());
4221  int instance_size = initial_map->instance_size();
4222  ASSERT(initial_map->pre_allocated_property_fields() +
4223  initial_map->unused_property_fields() -
4224  initial_map->inobject_properties() == 0);
4225 
4226  // Allocate memory for the object. The initial map might change when
4227  // the constructor's prototype changes, but instance size and property
4228  // counts remain unchanged (if slack tracking finished).
4229  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4230  __ AllocateInNewSpace(instance_size,
4231  result,
4232  no_reg,
4233  scratch,
4234  deferred->entry(),
4235  TAG_OBJECT);
4236 
4237  __ bind(deferred->exit());
4238  if (FLAG_debug_code) {
4239  Label is_in_new_space;
4240  __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4241  __ Abort("Allocated object is not in new-space");
4242  __ bind(&is_in_new_space);
4243  }
4244 
4245  // Load the initial map.
4246  Register map = scratch;
4247  __ LoadHeapObject(scratch, constructor);
4249 
4250  if (FLAG_debug_code) {
4251  __ AbortIfSmi(map);
4253  Immediate(instance_size >> kPointerSizeLog2));
4254  __ Assert(equal, "Unexpected instance size");
4256  Immediate(initial_map->pre_allocated_property_fields()));
4257  __ Assert(equal, "Unexpected pre-allocated property fields count");
4259  Immediate(initial_map->unused_property_fields()));
4260  __ Assert(equal, "Unexpected unused property fields count");
4262  Immediate(initial_map->inobject_properties()));
4263  __ Assert(equal, "Unexpected in-object property fields count");
4264  }
4265 
4266  // Initialize map and fields of the newly allocated object.
4267  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4268  __ movq(FieldOperand(result, JSObject::kMapOffset), map);
4269  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4270  __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
4271  __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
4272  if (initial_map->inobject_properties() != 0) {
4273  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4274  for (int i = 0; i < initial_map->inobject_properties(); i++) {
4275  int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4276  __ movq(FieldOperand(result, property_offset), scratch);
4277  }
4278  }
4279 }
4280 
4281 
4282 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4283  Register result = ToRegister(instr->result());
4284  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4285  Handle<Map> initial_map(constructor->initial_map());
4286  int instance_size = initial_map->instance_size();
4287 
4288  // TODO(3095996): Get rid of this. For now, we need to make the
4289  // result register contain a valid pointer because it is already
4290  // contained in the register pointer map.
4291  __ Set(result, 0);
4292 
4293  PushSafepointRegistersScope scope(this);
4294  __ Push(Smi::FromInt(instance_size));
4295  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4296  __ StoreToSafepointRegisterSlot(result, rax);
4297 }
4298 
4299 
4300 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4301  Heap* heap = isolate()->heap();
4302  ElementsKind boilerplate_elements_kind =
4303  instr->hydrogen()->boilerplate_elements_kind();
4304 
4305  // Deopt if the array literal boilerplate ElementsKind is of a type different
4306  // than the expected one. The check isn't necessary if the boilerplate has
4307  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4309  boilerplate_elements_kind, true)) {
4310  __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
4312  // Load the map's "bit field 2".
4314  // Retrieve elements_kind from bit field 2.
4315  __ and_(rbx, Immediate(Map::kElementsKindMask));
4316  __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
4318  DeoptimizeIf(not_equal, instr->environment());
4319  }
4320 
4321  // Set up the parameters to the stub/runtime call.
4324  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
4325  // Boilerplate already exists, constant elements are never accessed.
4326  // Pass an empty fixed array.
4327  __ Push(Handle<FixedArray>(heap->empty_fixed_array()));
4328 
4329  // Pick the right runtime function or stub to call.
4330  int length = instr->hydrogen()->length();
4331  if (instr->hydrogen()->IsCopyOnWrite()) {
4332  ASSERT(instr->hydrogen()->depth() == 1);
4335  FastCloneShallowArrayStub stub(mode, length);
4336  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4337  } else if (instr->hydrogen()->depth() > 1) {
4338  CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4340  CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4341  } else {
4343  boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4346  FastCloneShallowArrayStub stub(mode, length);
4347  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4348  }
4349 }
4350 
4351 
4352 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4353  Register result,
4354  Register source,
4355  int* offset) {
4356  ASSERT(!source.is(rcx));
4357  ASSERT(!result.is(rcx));
4358 
4359  // Only elements backing stores for non-COW arrays need to be copied.
4360  Handle<FixedArrayBase> elements(object->elements());
4361  bool has_elements = elements->length() > 0 &&
4362  elements->map() != isolate()->heap()->fixed_cow_array_map();
4363 
4364  // Increase the offset so that subsequent objects end up right after
4365  // this object and its backing store.
4366  int object_offset = *offset;
4367  int object_size = object->map()->instance_size();
4368  int elements_offset = *offset + object_size;
4369  int elements_size = has_elements ? elements->Size() : 0;
4370  *offset += object_size + elements_size;
4371 
4372  // Copy object header.
4373  ASSERT(object->properties()->length() == 0);
4374  int inobject_properties = object->map()->inobject_properties();
4375  int header_size = object_size - inobject_properties * kPointerSize;
4376  for (int i = 0; i < header_size; i += kPointerSize) {
4377  if (has_elements && i == JSObject::kElementsOffset) {
4378  __ lea(rcx, Operand(result, elements_offset));
4379  } else {
4380  __ movq(rcx, FieldOperand(source, i));
4381  }
4382  __ movq(FieldOperand(result, object_offset + i), rcx);
4383  }
4384 
4385  // Copy in-object properties.
4386  for (int i = 0; i < inobject_properties; i++) {
4387  int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4388  Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4389  if (value->IsJSObject()) {
4390  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4391  __ lea(rcx, Operand(result, *offset));
4392  __ movq(FieldOperand(result, total_offset), rcx);
4393  __ LoadHeapObject(source, value_object);
4394  EmitDeepCopy(value_object, result, source, offset);
4395  } else if (value->IsHeapObject()) {
4396  __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
4397  __ movq(FieldOperand(result, total_offset), rcx);
4398  } else {
4399  __ movq(rcx, value, RelocInfo::NONE);
4400  __ movq(FieldOperand(result, total_offset), rcx);
4401  }
4402  }
4403 
4404  if (has_elements) {
4405  // Copy elements backing store header.
4406  __ LoadHeapObject(source, elements);
4407  for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4408  __ movq(rcx, FieldOperand(source, i));
4409  __ movq(FieldOperand(result, elements_offset + i), rcx);
4410  }
4411 
4412  // Copy elements backing store content.
4413  int elements_length = elements->length();
4414  if (elements->IsFixedDoubleArray()) {
4415  Handle<FixedDoubleArray> double_array =
4417  for (int i = 0; i < elements_length; i++) {
4418  int64_t value = double_array->get_representation(i);
4419  int total_offset =
4420  elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4421  __ movq(rcx, value, RelocInfo::NONE);
4422  __ movq(FieldOperand(result, total_offset), rcx);
4423  }
4424  } else if (elements->IsFixedArray()) {
4425  Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
4426  for (int i = 0; i < elements_length; i++) {
4427  int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4428  Handle<Object> value(fast_elements->get(i));
4429  if (value->IsJSObject()) {
4430  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4431  __ lea(rcx, Operand(result, *offset));
4432  __ movq(FieldOperand(result, total_offset), rcx);
4433  __ LoadHeapObject(source, value_object);
4434  EmitDeepCopy(value_object, result, source, offset);
4435  } else if (value->IsHeapObject()) {
4436  __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
4437  __ movq(FieldOperand(result, total_offset), rcx);
4438  } else {
4439  __ movq(rcx, value, RelocInfo::NONE);
4440  __ movq(FieldOperand(result, total_offset), rcx);
4441  }
4442  }
4443  } else {
4444  UNREACHABLE();
4445  }
4446  }
4447 }
4448 
4449 
4450 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4451  int size = instr->hydrogen()->total_size();
4452  ElementsKind boilerplate_elements_kind =
4453  instr->hydrogen()->boilerplate()->GetElementsKind();
4454 
4455  // Deopt if the array literal boilerplate ElementsKind is of a type different
4456  // than the expected one. The check isn't necessary if the boilerplate has
4457  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4459  boilerplate_elements_kind, true)) {
4460  __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
4462  // Load the map's "bit field 2".
4464  // Retrieve elements_kind from bit field 2.
4465  __ and_(rcx, Immediate(Map::kElementsKindMask));
4466  __ cmpb(rcx, Immediate(boilerplate_elements_kind <<
4468  DeoptimizeIf(not_equal, instr->environment());
4469  }
4470 
4471  // Allocate all objects that are part of the literal in one big
4472  // allocation. This avoids multiple limit checks.
4473  Label allocated, runtime_allocate;
4474  __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
4475  __ jmp(&allocated);
4476 
4477  __ bind(&runtime_allocate);
4478  __ Push(Smi::FromInt(size));
4479  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4480 
4481  __ bind(&allocated);
4482  int offset = 0;
4483  __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
4484  EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
4485  ASSERT_EQ(size, offset);
4486 }
4487 
4488 
4489 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4490  Handle<FixedArray> literals(instr->environment()->closure()->literals());
4491  Handle<FixedArray> constant_properties =
4492  instr->hydrogen()->constant_properties();
4493 
4494  // Set up the parameters to the stub/runtime call.
4495  __ PushHeapObject(literals);
4496  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
4497  __ Push(constant_properties);
4498  int flags = instr->hydrogen()->fast_elements()
4501  flags |= instr->hydrogen()->has_function()
4504  __ Push(Smi::FromInt(flags));
4505 
4506  // Pick the right runtime function or stub to call.
4507  int properties_count = constant_properties->length() / 2;
4508  if (instr->hydrogen()->depth() > 1) {
4509  CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4510  } else if (flags != ObjectLiteral::kFastElements ||
4512  CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4513  } else {
4514  FastCloneShallowObjectStub stub(properties_count);
4515  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4516  }
4517 }
4518 
4519 
4520 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4521  ASSERT(ToRegister(instr->InputAt(0)).is(rax));
4522  __ push(rax);
4523  CallRuntime(Runtime::kToFastProperties, 1, instr);
4524 }
4525 
4526 
4527 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4528  Label materialized;
4529  // Registers will be used as follows:
4530  // rdi = JS function.
4531  // rcx = literals array.
4532  // rbx = regexp literal.
4533  // rax = regexp literal clone.
4536  int literal_offset = FixedArray::kHeaderSize +
4537  instr->hydrogen()->literal_index() * kPointerSize;
4538  __ movq(rbx, FieldOperand(rcx, literal_offset));
4539  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
4540  __ j(not_equal, &materialized, Label::kNear);
4541 
4542  // Create regexp literal using runtime function
4543  // Result will be in rax.
4544  __ push(rcx);
4545  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
4546  __ Push(instr->hydrogen()->pattern());
4547  __ Push(instr->hydrogen()->flags());
4548  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4549  __ movq(rbx, rax);
4550 
4551  __ bind(&materialized);
4553  Label allocated, runtime_allocate;
4554  __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
4555  __ jmp(&allocated);
4556 
4557  __ bind(&runtime_allocate);
4558  __ push(rbx);
4559  __ Push(Smi::FromInt(size));
4560  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4561  __ pop(rbx);
4562 
4563  __ bind(&allocated);
4564  // Copy the content into the newly allocated memory.
4565  // (Unroll copy loop once for better throughput).
4566  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4567  __ movq(rdx, FieldOperand(rbx, i));
4568  __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
4569  __ movq(FieldOperand(rax, i), rdx);
4570  __ movq(FieldOperand(rax, i + kPointerSize), rcx);
4571  }
4572  if ((size % (2 * kPointerSize)) != 0) {
4573  __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
4574  __ movq(FieldOperand(rax, size - kPointerSize), rdx);
4575  }
4576 }
4577 
4578 
4579 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4580  // Use the fast case closure allocation code that allocates in new
4581  // space for nested functions that don't need literals cloning.
4582  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4583  bool pretenure = instr->hydrogen()->pretenure();
4584  if (!pretenure && shared_info->num_literals() == 0) {
4585  FastNewClosureStub stub(shared_info->language_mode());
4586  __ Push(shared_info);
4587  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4588  } else {
4589  __ push(rsi);
4590  __ Push(shared_info);
4591  __ PushRoot(pretenure ?
4592  Heap::kTrueValueRootIndex :
4593  Heap::kFalseValueRootIndex);
4594  CallRuntime(Runtime::kNewClosure, 3, instr);
4595  }
4596 }
4597 
4598 
4599 void LCodeGen::DoTypeof(LTypeof* instr) {
4600  LOperand* input = instr->InputAt(0);
4601  EmitPushTaggedOperand(input);
4602  CallRuntime(Runtime::kTypeof, 1, instr);
4603 }
4604 
4605 
4606 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
4607  ASSERT(!operand->IsDoubleRegister());
4608  if (operand->IsConstantOperand()) {
4609  Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
4610  if (object->IsSmi()) {
4611  __ Push(Handle<Smi>::cast(object));
4612  } else {
4613  __ PushHeapObject(Handle<HeapObject>::cast(object));
4614  }
4615  } else if (operand->IsRegister()) {
4616  __ push(ToRegister(operand));
4617  } else {
4618  __ push(ToOperand(operand));
4619  }
4620 }
4621 
4622 
4623 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4624  Register input = ToRegister(instr->InputAt(0));
4625  int true_block = chunk_->LookupDestination(instr->true_block_id());
4626  int false_block = chunk_->LookupDestination(instr->false_block_id());
4627  Label* true_label = chunk_->GetAssemblyLabel(true_block);
4628  Label* false_label = chunk_->GetAssemblyLabel(false_block);
4629 
4630  Condition final_branch_condition =
4631  EmitTypeofIs(true_label, false_label, input, instr->type_literal());
4632  if (final_branch_condition != no_condition) {
4633  EmitBranch(true_block, false_block, final_branch_condition);
4634  }
4635 }
4636 
4637 
4638 Condition LCodeGen::EmitTypeofIs(Label* true_label,
4639  Label* false_label,
4640  Register input,
4641  Handle<String> type_name) {
4642  Condition final_branch_condition = no_condition;
4643  if (type_name->Equals(heap()->number_symbol())) {
4644  __ JumpIfSmi(input, true_label);
4645  __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
4646  Heap::kHeapNumberMapRootIndex);
4647 
4648  final_branch_condition = equal;
4649 
4650  } else if (type_name->Equals(heap()->string_symbol())) {
4651  __ JumpIfSmi(input, false_label);
4652  __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
4653  __ j(above_equal, false_label);
4654  __ testb(FieldOperand(input, Map::kBitFieldOffset),
4655  Immediate(1 << Map::kIsUndetectable));
4656  final_branch_condition = zero;
4657 
4658  } else if (type_name->Equals(heap()->boolean_symbol())) {
4659  __ CompareRoot(input, Heap::kTrueValueRootIndex);
4660  __ j(equal, true_label);
4661  __ CompareRoot(input, Heap::kFalseValueRootIndex);
4662  final_branch_condition = equal;
4663 
4664  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4665  __ CompareRoot(input, Heap::kNullValueRootIndex);
4666  final_branch_condition = equal;
4667 
4668  } else if (type_name->Equals(heap()->undefined_symbol())) {
4669  __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
4670  __ j(equal, true_label);
4671  __ JumpIfSmi(input, false_label);
4672  // Check for undetectable objects => true.
4673  __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
4674  __ testb(FieldOperand(input, Map::kBitFieldOffset),
4675  Immediate(1 << Map::kIsUndetectable));
4676  final_branch_condition = not_zero;
4677 
4678  } else if (type_name->Equals(heap()->function_symbol())) {
4680  __ JumpIfSmi(input, false_label);
4681  __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
4682  __ j(equal, true_label);
4683  __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
4684  final_branch_condition = equal;
4685 
4686  } else if (type_name->Equals(heap()->object_symbol())) {
4687  __ JumpIfSmi(input, false_label);
4688  if (!FLAG_harmony_typeof) {
4689  __ CompareRoot(input, Heap::kNullValueRootIndex);
4690  __ j(equal, true_label);
4691  }
4692  __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
4693  __ j(below, false_label);
4694  __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
4695  __ j(above, false_label);
4696  // Check for undetectable objects => false.
4697  __ testb(FieldOperand(input, Map::kBitFieldOffset),
4698  Immediate(1 << Map::kIsUndetectable));
4699  final_branch_condition = zero;
4700 
4701  } else {
4702  __ jmp(false_label);
4703  }
4704 
4705  return final_branch_condition;
4706 }
4707 
4708 
4709 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4710  Register temp = ToRegister(instr->TempAt(0));
4711  int true_block = chunk_->LookupDestination(instr->true_block_id());
4712  int false_block = chunk_->LookupDestination(instr->false_block_id());
4713 
4714  EmitIsConstructCall(temp);
4715  EmitBranch(true_block, false_block, equal);
4716 }
4717 
4718 
4719 void LCodeGen::EmitIsConstructCall(Register temp) {
4720  // Get the frame pointer for the calling frame.
4721  __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
4722 
4723  // Skip the arguments adaptor frame if it exists.
4724  Label check_frame_marker;
4725  __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
4727  __ j(not_equal, &check_frame_marker, Label::kNear);
4728  __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
4729 
4730  // Check the marker in the calling frame.
4731  __ bind(&check_frame_marker);
4732  __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
4733  Smi::FromInt(StackFrame::CONSTRUCT));
4734 }
4735 
4736 
4737 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
4738  // Ensure that we have enough space after the previous lazy-bailout
4739  // instruction for patching the code here.
4740  int current_pc = masm()->pc_offset();
4741  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
4742  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
4743  __ Nop(padding_size);
4744  }
4745 }
4746 
4747 
4748 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4749  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4750  last_lazy_deopt_pc_ = masm()->pc_offset();
4751  ASSERT(instr->HasEnvironment());
4752  LEnvironment* env = instr->environment();
4753  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4754  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4755 }
4756 
4757 
4758 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4759  DeoptimizeIf(no_condition, instr->environment());
4760 }
4761 
4762 
4763 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4764  LOperand* obj = instr->object();
4765  LOperand* key = instr->key();
4766  EmitPushTaggedOperand(obj);
4767  EmitPushTaggedOperand(key);
4768  ASSERT(instr->HasPointerMap());
4769  LPointerMap* pointers = instr->pointer_map();
4770  RecordPosition(pointers->position());
4771  // Create safepoint generator that will also ensure enough space in the
4772  // reloc info for patching in deoptimization (since this is invoking a
4773  // builtin)
4774  SafepointGenerator safepoint_generator(
4775  this, pointers, Safepoint::kLazyDeopt);
4776  __ Push(Smi::FromInt(strict_mode_flag()));
4777  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4778 }
4779 
4780 
4781 void LCodeGen::DoIn(LIn* instr) {
4782  LOperand* obj = instr->object();
4783  LOperand* key = instr->key();
4784  EmitPushTaggedOperand(key);
4785  EmitPushTaggedOperand(obj);
4786  ASSERT(instr->HasPointerMap());
4787  LPointerMap* pointers = instr->pointer_map();
4788  RecordPosition(pointers->position());
4789  SafepointGenerator safepoint_generator(
4790  this, pointers, Safepoint::kLazyDeopt);
4791  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4792 }
4793 
4794 
4795 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4796  PushSafepointRegistersScope scope(this);
4798  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4799  RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4800  ASSERT(instr->HasEnvironment());
4801  LEnvironment* env = instr->environment();
4802  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4803 }
4804 
4805 
4806 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4807  class DeferredStackCheck: public LDeferredCode {
4808  public:
4809  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4810  : LDeferredCode(codegen), instr_(instr) { }
4811  virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4812  virtual LInstruction* instr() { return instr_; }
4813  private:
4814  LStackCheck* instr_;
4815  };
4816 
4817  ASSERT(instr->HasEnvironment());
4818  LEnvironment* env = instr->environment();
4819  // There is no LLazyBailout instruction for stack-checks. We have to
4820  // prepare for lazy deoptimization explicitly here.
4821  if (instr->hydrogen()->is_function_entry()) {
4822  // Perform stack overflow check.
4823  Label done;
4824  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
4825  __ j(above_equal, &done, Label::kNear);
4826  StackCheckStub stub;
4827  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4828  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4829  last_lazy_deopt_pc_ = masm()->pc_offset();
4830  __ bind(&done);
4831  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4832  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4833  } else {
4834  ASSERT(instr->hydrogen()->is_backwards_branch());
4835  // Perform stack overflow check if this goto needs it before jumping.
4836  DeferredStackCheck* deferred_stack_check =
4837  new(zone()) DeferredStackCheck(this, instr);
4838  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
4839  __ j(below, deferred_stack_check->entry());
4840  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4841  last_lazy_deopt_pc_ = masm()->pc_offset();
4842  __ bind(instr->done_label());
4843  deferred_stack_check->SetExit(instr->done_label());
4844  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4845  // Don't record a deoptimization index for the safepoint here.
4846  // This will be done explicitly when emitting call and the safepoint in
4847  // the deferred code.
4848  }
4849 }
4850 
4851 
4852 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4853  // This is a pseudo-instruction that ensures that the environment here is
4854  // properly registered for deoptimization and records the assembler's PC
4855  // offset.
4856  LEnvironment* environment = instr->environment();
4857  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
4858  instr->SpilledDoubleRegisterArray());
4859 
4860  // If the environment were already registered, we would have no way of
4861  // backpatching it with the spill slot operands.
4862  ASSERT(!environment->HasBeenRegistered());
4863  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4864  ASSERT(osr_pc_offset_ == -1);
4865  osr_pc_offset_ = masm()->pc_offset();
4866 }
4867 
4868 
4869 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
4870  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
4871  DeoptimizeIf(equal, instr->environment());
4872 
4873  Register null_value = rdi;
4874  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
4875  __ cmpq(rax, null_value);
4876  DeoptimizeIf(equal, instr->environment());
4877 
4878  Condition cc = masm()->CheckSmi(rax);
4879  DeoptimizeIf(cc, instr->environment());
4880 
4882  __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
4883  DeoptimizeIf(below_equal, instr->environment());
4884 
4885  Label use_cache, call_runtime;
4886  __ CheckEnumCache(null_value, &call_runtime);
4887 
4889  __ jmp(&use_cache, Label::kNear);
4890 
4891  // Get the set of properties to enumerate.
4892  __ bind(&call_runtime);
4893  __ push(rax);
4894  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
4895 
4896  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
4897  Heap::kMetaMapRootIndex);
4898  DeoptimizeIf(not_equal, instr->environment());
4899  __ bind(&use_cache);
4900 }
4901 
4902 
4903 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
4904  Register map = ToRegister(instr->map());
4905  Register result = ToRegister(instr->result());
4906  __ LoadInstanceDescriptors(map, result);
4907  __ movq(result,
4909  __ movq(result,
4910  FieldOperand(result, FixedArray::SizeFor(instr->idx())));
4911  Condition cc = masm()->CheckSmi(result);
4912  DeoptimizeIf(cc, instr->environment());
4913 }
4914 
4915 
4916 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
4917  Register object = ToRegister(instr->value());
4918  __ cmpq(ToRegister(instr->map()),
4920  DeoptimizeIf(not_equal, instr->environment());
4921 }
4922 
4923 
4924 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
4925  Register object = ToRegister(instr->object());
4926  Register index = ToRegister(instr->index());
4927 
4928  Label out_of_object, done;
4929  __ SmiToInteger32(index, index);
4930  __ cmpl(index, Immediate(0));
4931  __ j(less, &out_of_object);
4932  __ movq(object, FieldOperand(object,
4933  index,
4936  __ jmp(&done, Label::kNear);
4937 
4938  __ bind(&out_of_object);
4939  __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
4940  __ negl(index);
4941  // Index is now equal to out of object property index plus 1.
4942  __ movq(object, FieldOperand(object,
4943  index,
4945  FixedArray::kHeaderSize - kPointerSize));
4946  __ bind(&done);
4947 }
4948 
4949 
4950 #undef __
4951 
4952 } } // namespace v8::internal
4953 
4954 #endif // V8_TARGET_ARCH_X64
byte * Address
Definition: globals.h:172
const Register rdx
static const int kElementsKindMask
Definition: objects.h:5019
const int kMinInt
Definition: globals.h:225
static const int kBitFieldOffset
Definition: objects.h:4994
static LGap * cast(LInstruction *instr)
Definition: lithium-arm.h:318
const intptr_t kSmiTagMask
Definition: v8.h:3855
static const int kCodeEntryOffset
Definition: objects.h:5981
static const int kMaxAsciiCharCode
Definition: objects.h:7107
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:5982
const char * ToCString(const v8::String::Utf8Value &value)
static int SlotOffset(int index)
Definition: contexts.h:408
void PrintF(const char *format,...)
Definition: v8utils.cc:40
static Smi * FromInt(int value)
Definition: objects-inl.h:973
bool IsFastObjectElementsKind(ElementsKind kind)
const Register rbp
const int KB
Definition: globals.h:221
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
Definition: handles.h:81
static const int kGlobalReceiverOffset
Definition: objects.h:6085
const Register rsi
static const int kNativeByteOffset
Definition: objects.h:5778
Flag flags[]
Definition: flags.cc:1467
int int32_t
Definition: unicode.cc:47
static XMMRegister FromAllocationIndex(int index)
static bool IsSupported(CpuFeature f)
static const int kStrictModeBitWithinByte
Definition: objects.h:5764
static const int kExternalPointerOffset
Definition: objects.h:3720
static const int kSize
Definition: objects.h:6433
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
Definition: globals.h:246
static const int kInstanceSizeOffset
Definition: objects.h:4981
static const int kInObjectFieldCount
Definition: objects.h:6487
static const int kMaximumSlots
Definition: code-stubs.h:343
MemOperand GlobalObjectOperand()
static const int kInstanceClassNameOffset
Definition: objects.h:5609
static const int kUnusedPropertyFieldsOffset
Definition: objects.h:4993
static const int kGlobalContextOffset
Definition: objects.h:6084
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:5986
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
Definition: handles.cc:326
static const int kHashFieldOffset
Definition: objects.h:7099
Condition ReverseCondition(Condition cond)
#define IN
const uint32_t kSlotsZapValue
Definition: v8globals.h:92
static const int kLiteralsOffset
Definition: objects.h:5987
#define UNREACHABLE()
Definition: checks.h:50
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7098
static const int kValueOffset
Definition: objects.h:1307
const uint32_t kHoleNanUpper32
Definition: v8globals.h:476
const XMMRegister xmm1
const int kPointerSize
Definition: globals.h:234
Operand FieldOperand(Register object, int offset)
const int kHeapObjectTag
Definition: v8.h:3848
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:269
const Register rbx
const uint32_t kHoleNanLower32
Definition: v8globals.h:477
const Register rsp
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
#define __
static void VPrint(const char *format, va_list args)
static const int kCacheStampOffset
Definition: objects.h:6280
static const int kPropertiesOffset
Definition: objects.h:2113
const Register rax
static const int kInObjectPropertiesOffset
Definition: objects.h:4983
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
Definition: objects.h:2374
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
const Register rdi
static const int kElementsOffset
Definition: objects.h:2114
static const int kNativeBitWithinByte
Definition: objects.h:5770
static const int kContainsCachedArrayIndexMask
Definition: objects.h:7154
bool IsPowerOf2(T x)
Definition: utils.h:50
static Vector< T > New(int length)
Definition: utils.h:369
int ElementsKindToShiftSize(ElementsKind elements_kind)
Definition: lithium.cc:213
Vector< const char > CStrVector(const char *data)
Definition: utils.h:525
static int OffsetOfElementAt(int index)
Definition: objects.h:2291
static const int kLengthOffset
Definition: objects.h:8111
static int SizeFor(int length)
Definition: objects.h:2288
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:2233
static const int kEnumerationIndexOffset
Definition: objects.h:2622
static const int kMapOffset
Definition: objects.h:1219
static const int kValueOffset
Definition: objects.h:6272
bool is(Register reg) const
static const int kLengthOffset
Definition: objects.h:2232
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:445
static const int kHasNonInstancePrototype
Definition: objects.h:5001
const Register kScratchRegister
ElementsKind GetInitialFastElementsKind()
static const uint32_t kSignMask
Definition: objects.h:1316
static const int kStrictModeByteOffset
Definition: objects.h:5774
const int kSmiTagSize
Definition: v8.h:3854
const Register rcx
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static const int kElementsKindShift
Definition: objects.h:5015
static const int kConstructorOffset
Definition: objects.h:4954
static double canonical_not_the_hole_nan_as_double()
Definition: objects-inl.h:1714
#define ASSERT_NE(v1, v2)
Definition: checks.h:272
static const int kIsUndetectable
Definition: objects.h:5005
static const int kHeaderSize
Definition: objects.h:2115
static const int kPrototypeOffset
Definition: objects.h:4953
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static const int kMaxLength
Definition: objects.h:7166
static const int kValueOffset
Definition: objects.h:6188
static const int kHashShift
Definition: objects.h:7121
const XMMRegister xmm2
static const int kSharedFunctionInfoOffset
Definition: objects.h:5984
Register ToRegister(int num)
static const int kMaxValue
Definition: objects.h:1006
static const int kBitField2Offset
Definition: objects.h:4995
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
Definition: ic.cc:2544
#define ARRAY_SIZE(a)
Definition: globals.h:295
static const int kExponentOffset
Definition: objects.h:1313
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1223
static JSObject * cast(Object *obj)
FlagType type() const
Definition: flags.cc:1358
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
Definition: objects.h:4992
virtual void BeforeCall(int call_size) const
static const int kPreAllocatedPropertyFieldsOffset
Definition: objects.h:4986
const XMMRegister xmm0