v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_IA32
31 
33 #include "ic.h"
34 #include "code-stubs.h"
35 #include "deoptimizer.h"
36 #include "stub-cache.h"
37 #include "codegen.h"
38 #include "hydrogen-osr.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 
44 static SaveFPRegsMode GetSaveFPRegsMode() {
45  // We don't need to save floating point regs when generating the snapshot
47 }
48 
49 
50 // When invoking builtins, we need to record the safepoint in the middle of
51 // the invoke instruction sequence generated by the macro assembler.
52 class SafepointGenerator V8_FINAL : public CallWrapper {
53  public:
54  SafepointGenerator(LCodeGen* codegen,
55  LPointerMap* pointers,
56  Safepoint::DeoptMode mode)
57  : codegen_(codegen),
58  pointers_(pointers),
59  deopt_mode_(mode) {}
60  virtual ~SafepointGenerator() {}
61 
62  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
63 
64  virtual void AfterCall() const V8_OVERRIDE {
65  codegen_->RecordSafepoint(pointers_, deopt_mode_);
66  }
67 
68  private:
69  LCodeGen* codegen_;
70  LPointerMap* pointers_;
71  Safepoint::DeoptMode deopt_mode_;
72 };
73 
74 
75 #define __ masm()->
76 
78  LPhase phase("Z_Code generation", chunk());
79  ASSERT(is_unused());
80  status_ = GENERATING;
81 
82  // Open a frame scope to indicate that there is a frame on the stack. The
83  // MANUAL indicates that the scope shouldn't actually generate code to set up
84  // the frame (that is done in GeneratePrologue).
85  FrameScope frame_scope(masm_, StackFrame::MANUAL);
86 
87  support_aligned_spilled_doubles_ = info()->IsOptimizing();
88 
89  dynamic_frame_alignment_ = info()->IsOptimizing() &&
90  ((chunk()->num_double_slots() > 2 &&
91  !chunk()->graph()->is_recursive()) ||
92  !info()->osr_ast_id().IsNone());
93 
94  return GeneratePrologue() &&
95  GenerateBody() &&
96  GenerateDeferredCode() &&
97  GenerateJumpTable() &&
98  GenerateSafepointTable();
99 }
100 
101 
102 void LCodeGen::FinishCode(Handle<Code> code) {
103  ASSERT(is_done());
104  code->set_stack_slots(GetStackSlotCount());
105  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
106  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
107  PopulateDeoptimizationData(code);
108  if (!info()->IsStub()) {
110  }
111  info()->CommitDependencies(code);
112 }
113 
114 
115 void LCodeGen::Abort(BailoutReason reason) {
116  info()->set_bailout_reason(reason);
117  status_ = ABORTED;
118 }
119 
120 
121 #ifdef _MSC_VER
122 void LCodeGen::MakeSureStackPagesMapped(int offset) {
123  const int kPageSize = 4 * KB;
124  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
125  __ mov(Operand(esp, offset), eax);
126  }
127 }
128 #endif
129 
130 
131 void LCodeGen::SaveCallerDoubles() {
132  ASSERT(info()->saves_caller_doubles());
134  Comment(";;; Save clobbered callee double registers");
135  CpuFeatureScope scope(masm(), SSE2);
136  int count = 0;
137  BitVector* doubles = chunk()->allocated_double_registers();
138  BitVector::Iterator save_iterator(doubles);
139  while (!save_iterator.Done()) {
140  __ movsd(MemOperand(esp, count * kDoubleSize),
141  XMMRegister::FromAllocationIndex(save_iterator.Current()));
142  save_iterator.Advance();
143  count++;
144  }
145 }
146 
147 
148 void LCodeGen::RestoreCallerDoubles() {
149  ASSERT(info()->saves_caller_doubles());
151  Comment(";;; Restore clobbered callee double registers");
152  CpuFeatureScope scope(masm(), SSE2);
153  BitVector* doubles = chunk()->allocated_double_registers();
154  BitVector::Iterator save_iterator(doubles);
155  int count = 0;
156  while (!save_iterator.Done()) {
157  __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
158  MemOperand(esp, count * kDoubleSize));
159  save_iterator.Advance();
160  count++;
161  }
162 }
163 
164 
165 bool LCodeGen::GeneratePrologue() {
166  ASSERT(is_generating());
167 
168  if (info()->IsOptimizing()) {
170 
171 #ifdef DEBUG
172  if (strlen(FLAG_stop_at) > 0 &&
173  info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
174  __ int3();
175  }
176 #endif
177 
178  // Sloppy mode functions and builtins need to replace the receiver with the
179  // global proxy when called as functions (without an explicit receiver
180  // object).
181  if (info_->this_has_uses() &&
182  info_->strict_mode() == SLOPPY &&
183  !info_->is_native()) {
184  Label ok;
185  // +1 for return address.
186  int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
187  __ mov(ecx, Operand(esp, receiver_offset));
188 
189  __ cmp(ecx, isolate()->factory()->undefined_value());
190  __ j(not_equal, &ok, Label::kNear);
191 
192  __ mov(ecx, GlobalObjectOperand());
194 
195  __ mov(Operand(esp, receiver_offset), ecx);
196 
197  __ bind(&ok);
198  }
199 
200  if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
201  // Move state of dynamic frame alignment into edx.
202  __ Move(edx, Immediate(kNoAlignmentPadding));
203 
204  Label do_not_pad, align_loop;
206  // Align esp + 4 to a multiple of 2 * kPointerSize.
207  __ test(esp, Immediate(kPointerSize));
208  __ j(not_zero, &do_not_pad, Label::kNear);
209  __ push(Immediate(0));
210  __ mov(ebx, esp);
211  __ mov(edx, Immediate(kAlignmentPaddingPushed));
212  // Copy arguments, receiver, and return address.
213  __ mov(ecx, Immediate(scope()->num_parameters() + 2));
214 
215  __ bind(&align_loop);
216  __ mov(eax, Operand(ebx, 1 * kPointerSize));
217  __ mov(Operand(ebx, 0), eax);
218  __ add(Operand(ebx), Immediate(kPointerSize));
219  __ dec(ecx);
220  __ j(not_zero, &align_loop, Label::kNear);
221  __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
222  __ bind(&do_not_pad);
223  }
224  }
225 
226  info()->set_prologue_offset(masm_->pc_offset());
227  if (NeedsEagerFrame()) {
228  ASSERT(!frame_is_built_);
229  frame_is_built_ = true;
230  __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
231  info()->AddNoFrameRange(0, masm_->pc_offset());
232  }
233 
234  if (info()->IsOptimizing() &&
235  dynamic_frame_alignment_ &&
236  FLAG_debug_code) {
237  __ test(esp, Immediate(kPointerSize));
238  __ Assert(zero, kFrameIsExpectedToBeAligned);
239  }
240 
241  // Reserve space for the stack slots needed by the code.
242  int slots = GetStackSlotCount();
243  ASSERT(slots != 0 || !info()->IsOptimizing());
244  if (slots > 0) {
245  if (slots == 1) {
246  if (dynamic_frame_alignment_) {
247  __ push(edx);
248  } else {
249  __ push(Immediate(kNoAlignmentPadding));
250  }
251  } else {
252  if (FLAG_debug_code) {
253  __ sub(Operand(esp), Immediate(slots * kPointerSize));
254 #ifdef _MSC_VER
255  MakeSureStackPagesMapped(slots * kPointerSize);
256 #endif
257  __ push(eax);
258  __ mov(Operand(eax), Immediate(slots));
259  Label loop;
260  __ bind(&loop);
261  __ mov(MemOperand(esp, eax, times_4, 0),
262  Immediate(kSlotsZapValue));
263  __ dec(eax);
264  __ j(not_zero, &loop);
265  __ pop(eax);
266  } else {
267  __ sub(Operand(esp), Immediate(slots * kPointerSize));
268 #ifdef _MSC_VER
269  MakeSureStackPagesMapped(slots * kPointerSize);
270 #endif
271  }
272 
273  if (support_aligned_spilled_doubles_) {
274  Comment(";;; Store dynamic frame alignment tag for spilled doubles");
275  // Store dynamic frame alignment state in the first local.
277  if (dynamic_frame_alignment_) {
278  __ mov(Operand(ebp, offset), edx);
279  } else {
280  __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
281  }
282  }
283  }
284 
285  if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
286  SaveCallerDoubles();
287  }
288  }
289 
290  // Possibly allocate a local context.
291  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
292  if (heap_slots > 0) {
293  Comment(";;; Allocate local context");
294  // Argument to NewContext is the function, which is still in edi.
295  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
296  FastNewContextStub stub(heap_slots);
297  __ CallStub(&stub);
298  } else {
299  __ push(edi);
300  __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
301  }
302  RecordSafepoint(Safepoint::kNoLazyDeopt);
303  // Context is returned in eax. It replaces the context passed to us.
304  // It's saved in the stack and kept live in esi.
305  __ mov(esi, eax);
307 
308  // Copy parameters into context if necessary.
309  int num_parameters = scope()->num_parameters();
310  for (int i = 0; i < num_parameters; i++) {
311  Variable* var = scope()->parameter(i);
312  if (var->IsContextSlot()) {
313  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
314  (num_parameters - 1 - i) * kPointerSize;
315  // Load parameter from stack.
316  __ mov(eax, Operand(ebp, parameter_offset));
317  // Store it in the context.
318  int context_offset = Context::SlotOffset(var->index());
319  __ mov(Operand(esi, context_offset), eax);
320  // Update the write barrier. This clobbers eax and ebx.
321  __ RecordWriteContextSlot(esi,
322  context_offset,
323  eax,
324  ebx,
326  }
327  }
328  Comment(";;; End allocate local context");
329  }
330 
331  // Trace the call.
332  if (FLAG_trace && info()->IsOptimizing()) {
333  // We have not executed any compiled code yet, so esi still holds the
334  // incoming context.
335  __ CallRuntime(Runtime::kTraceEnter, 0);
336  }
337  return !is_aborted();
338 }
339 
340 
341 void LCodeGen::GenerateOsrPrologue() {
342  // Generate the OSR entry prologue at the first unknown OSR value, or if there
343  // are none, at the OSR entrypoint instruction.
344  if (osr_pc_offset_ >= 0) return;
345 
346  osr_pc_offset_ = masm()->pc_offset();
347 
348  // Move state of dynamic frame alignment into edx.
349  __ Move(edx, Immediate(kNoAlignmentPadding));
350 
351  if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
352  Label do_not_pad, align_loop;
353  // Align ebp + 4 to a multiple of 2 * kPointerSize.
354  __ test(ebp, Immediate(kPointerSize));
355  __ j(zero, &do_not_pad, Label::kNear);
356  __ push(Immediate(0));
357  __ mov(ebx, esp);
358  __ mov(edx, Immediate(kAlignmentPaddingPushed));
359 
360  // Move all parts of the frame over one word. The frame consists of:
361  // unoptimized frame slots, alignment state, context, frame pointer, return
362  // address, receiver, and the arguments.
363  __ mov(ecx, Immediate(scope()->num_parameters() +
364  5 + graph()->osr()->UnoptimizedFrameSlots()));
365 
366  __ bind(&align_loop);
367  __ mov(eax, Operand(ebx, 1 * kPointerSize));
368  __ mov(Operand(ebx, 0), eax);
369  __ add(Operand(ebx), Immediate(kPointerSize));
370  __ dec(ecx);
371  __ j(not_zero, &align_loop, Label::kNear);
372  __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
373  __ sub(Operand(ebp), Immediate(kPointerSize));
374  __ bind(&do_not_pad);
375  }
376 
377  // Save the first local, which is overwritten by the alignment state.
378  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
379  __ push(alignment_loc);
380 
381  // Set the dynamic frame alignment state.
382  __ mov(alignment_loc, edx);
383 
384  // Adjust the frame size, subsuming the unoptimized frame into the
385  // optimized frame.
386  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
387  ASSERT(slots >= 1);
388  __ sub(esp, Immediate((slots - 1) * kPointerSize));
389 }
390 
391 
392 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
393  if (instr->IsCall()) {
394  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
395  }
396  if (!instr->IsLazyBailout() && !instr->IsGap()) {
397  safepoints_.BumpLastLazySafepointIndex();
398  }
399  if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
400 }
401 
402 
403 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
405  if (instr->IsGoto()) {
406  x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
407  } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
408  !instr->IsGap() && !instr->IsReturn()) {
409  if (instr->ClobbersDoubleRegisters()) {
410  if (instr->HasDoubleRegisterResult()) {
411  ASSERT_EQ(1, x87_stack_.depth());
412  } else {
413  ASSERT_EQ(0, x87_stack_.depth());
414  }
415  }
416  __ VerifyX87StackDepth(x87_stack_.depth());
417  }
418  }
419 }
420 
421 
422 bool LCodeGen::GenerateJumpTable() {
423  Label needs_frame;
424  if (jump_table_.length() > 0) {
425  Comment(";;; -------------------- Jump table --------------------");
426  }
427  for (int i = 0; i < jump_table_.length(); i++) {
428  __ bind(&jump_table_[i].label);
429  Address entry = jump_table_[i].address;
430  Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
431  int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
433  Comment(";;; jump table entry %d.", i);
434  } else {
435  Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
436  }
437  if (jump_table_[i].needs_frame) {
438  ASSERT(!info()->saves_caller_doubles());
439  __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
440  if (needs_frame.is_bound()) {
441  __ jmp(&needs_frame);
442  } else {
443  __ bind(&needs_frame);
445  // This variant of deopt can only be used with stubs. Since we don't
446  // have a function pointer to install in the stack frame that we're
447  // building, install a special marker there instead.
448  ASSERT(info()->IsStub());
449  __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
450  // Push a PC inside the function so that the deopt code can find where
451  // the deopt comes from. It doesn't have to be the precise return
452  // address of a "calling" LAZY deopt, it only has to be somewhere
453  // inside the code body.
454  Label push_approx_pc;
455  __ call(&push_approx_pc);
456  __ bind(&push_approx_pc);
457  // Push the continuation which was stashed were the ebp should
458  // be. Replace it with the saved ebp.
459  __ push(MemOperand(esp, 3 * kPointerSize));
460  __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
461  __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
462  __ ret(0); // Call the continuation without clobbering registers.
463  }
464  } else {
465  if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
466  RestoreCallerDoubles();
467  }
468  __ call(entry, RelocInfo::RUNTIME_ENTRY);
469  }
470  }
471  return !is_aborted();
472 }
473 
474 
475 bool LCodeGen::GenerateDeferredCode() {
476  ASSERT(is_generating());
477  if (deferred_.length() > 0) {
478  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
479  LDeferredCode* code = deferred_[i];
480  X87Stack copy(code->x87_stack());
481  x87_stack_ = copy;
482 
483  HValue* value =
484  instructions_->at(code->instruction_index())->hydrogen_value();
485  RecordAndWritePosition(
486  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
487 
488  Comment(";;; <@%d,#%d> "
489  "-------------------- Deferred %s --------------------",
490  code->instruction_index(),
491  code->instr()->hydrogen_value()->id(),
492  code->instr()->Mnemonic());
493  __ bind(code->entry());
494  if (NeedsDeferredFrame()) {
495  Comment(";;; Build frame");
496  ASSERT(!frame_is_built_);
497  ASSERT(info()->IsStub());
498  frame_is_built_ = true;
499  // Build the frame in such a way that esi isn't trashed.
500  __ push(ebp); // Caller's frame pointer.
502  __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
503  __ lea(ebp, Operand(esp, 2 * kPointerSize));
504  Comment(";;; Deferred code");
505  }
506  code->Generate();
507  if (NeedsDeferredFrame()) {
508  __ bind(code->done());
509  Comment(";;; Destroy frame");
510  ASSERT(frame_is_built_);
511  frame_is_built_ = false;
512  __ mov(esp, ebp);
513  __ pop(ebp);
514  }
515  __ jmp(code->exit());
516  }
517  }
518 
519  // Deferred code is the last part of the instruction sequence. Mark
520  // the generated code as done unless we bailed out.
521  if (!is_aborted()) status_ = DONE;
522  return !is_aborted();
523 }
524 
525 
526 bool LCodeGen::GenerateSafepointTable() {
527  ASSERT(is_done());
528  if (!info()->IsStub()) {
529  // For lazy deoptimization we need space to patch a call after every call.
530  // Ensure there is always space for such patching, even if the code ends
531  // in a call.
532  int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
533  while (masm()->pc_offset() < target_offset) {
534  masm()->nop();
535  }
536  }
537  safepoints_.Emit(masm(), GetStackSlotCount());
538  return !is_aborted();
539 }
540 
541 
542 Register LCodeGen::ToRegister(int index) const {
543  return Register::FromAllocationIndex(index);
544 }
545 
546 
547 X87Register LCodeGen::ToX87Register(int index) const {
548  return X87Register::FromAllocationIndex(index);
549 }
550 
551 
552 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
553  return XMMRegister::FromAllocationIndex(index);
554 }
555 
556 
557 void LCodeGen::X87LoadForUsage(X87Register reg) {
558  ASSERT(x87_stack_.Contains(reg));
559  x87_stack_.Fxch(reg);
560  x87_stack_.pop();
561 }
562 
563 
564 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
565  ASSERT(x87_stack_.Contains(reg1));
566  ASSERT(x87_stack_.Contains(reg2));
567  x87_stack_.Fxch(reg1, 1);
568  x87_stack_.Fxch(reg2);
569  x87_stack_.pop();
570  x87_stack_.pop();
571 }
572 
573 
574 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
575  ASSERT(is_mutable_);
576  ASSERT(Contains(reg) && stack_depth_ > other_slot);
577  int i = ArrayIndex(reg);
578  int st = st2idx(i);
579  if (st != other_slot) {
580  int other_i = st2idx(other_slot);
581  X87Register other = stack_[other_i];
582  stack_[other_i] = reg;
583  stack_[i] = other;
584  if (st == 0) {
585  __ fxch(other_slot);
586  } else if (other_slot == 0) {
587  __ fxch(st);
588  } else {
589  __ fxch(st);
590  __ fxch(other_slot);
591  __ fxch(st);
592  }
593  }
594 }
595 
596 
597 int LCodeGen::X87Stack::st2idx(int pos) {
598  return stack_depth_ - pos - 1;
599 }
600 
601 
602 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
603  for (int i = 0; i < stack_depth_; i++) {
604  if (stack_[i].is(reg)) return i;
605  }
606  UNREACHABLE();
607  return -1;
608 }
609 
610 
611 bool LCodeGen::X87Stack::Contains(X87Register reg) {
612  for (int i = 0; i < stack_depth_; i++) {
613  if (stack_[i].is(reg)) return true;
614  }
615  return false;
616 }
617 
618 
619 void LCodeGen::X87Stack::Free(X87Register reg) {
620  ASSERT(is_mutable_);
621  ASSERT(Contains(reg));
622  int i = ArrayIndex(reg);
623  int st = st2idx(i);
624  if (st > 0) {
625  // keep track of how fstp(i) changes the order of elements
626  int tos_i = st2idx(0);
627  stack_[i] = stack_[tos_i];
628  }
629  pop();
630  __ fstp(st);
631 }
632 
633 
634 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
635  if (x87_stack_.Contains(dst)) {
636  x87_stack_.Fxch(dst);
637  __ fstp(0);
638  } else {
639  x87_stack_.push(dst);
640  }
641  X87Fld(src, opts);
642 }
643 
644 
645 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
646  ASSERT(!src.is_reg_only());
647  switch (opts) {
648  case kX87DoubleOperand:
649  __ fld_d(src);
650  break;
651  case kX87FloatOperand:
652  __ fld_s(src);
653  break;
654  case kX87IntOperand:
655  __ fild_s(src);
656  break;
657  default:
658  UNREACHABLE();
659  }
660 }
661 
662 
663 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
664  ASSERT(!dst.is_reg_only());
665  x87_stack_.Fxch(src);
666  switch (opts) {
667  case kX87DoubleOperand:
668  __ fst_d(dst);
669  break;
670  case kX87IntOperand:
671  __ fist_s(dst);
672  break;
673  default:
674  UNREACHABLE();
675  }
676 }
677 
678 
679 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
680  ASSERT(is_mutable_);
681  if (Contains(reg)) {
682  Free(reg);
683  }
684  // Mark this register as the next register to write to
685  stack_[stack_depth_] = reg;
686 }
687 
688 
689 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
690  ASSERT(is_mutable_);
691  // Assert the reg is prepared to write, but not on the virtual stack yet
692  ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
694  stack_depth_++;
695 }
696 
697 
699  X87Register left, X87Register right, X87Register result) {
700  // You need to use DefineSameAsFirst for x87 instructions
701  ASSERT(result.is(left));
702  x87_stack_.Fxch(right, 1);
703  x87_stack_.Fxch(left);
704 }
705 
706 
707 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
708  if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
709  bool double_inputs = instr->HasDoubleRegisterInput();
710 
711  // Flush stack from tos down, since FreeX87() will mess with tos
712  for (int i = stack_depth_-1; i >= 0; i--) {
713  X87Register reg = stack_[i];
714  // Skip registers which contain the inputs for the next instruction
715  // when flushing the stack
716  if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
717  continue;
718  }
719  Free(reg);
720  if (i < stack_depth_-1) i++;
721  }
722  }
723  if (instr->IsReturn()) {
724  while (stack_depth_ > 0) {
725  __ fstp(0);
726  stack_depth_--;
727  }
728  if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
729  }
730 }
731 
732 
733 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
734  ASSERT(stack_depth_ <= 1);
735  // If ever used for new stubs producing two pairs of doubles joined into two
736  // phis this assert hits. That situation is not handled, since the two stacks
737  // might have st0 and st1 swapped.
738  if (current_block_id + 1 != goto_instr->block_id()) {
739  // If we have a value on the x87 stack on leaving a block, it must be a
740  // phi input. If the next block we compile is not the join block, we have
741  // to discard the stack state.
742  stack_depth_ = 0;
743  }
744 }
745 
746 
747 void LCodeGen::EmitFlushX87ForDeopt() {
748  // The deoptimizer does not support X87 Registers. But as long as we
749  // deopt from a stub its not a problem, since we will re-materialize the
750  // original stub inputs, which can't be double registers.
751  ASSERT(info()->IsStub());
752  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
753  __ pushfd();
754  __ VerifyX87StackDepth(x87_stack_.depth());
755  __ popfd();
756  }
757  for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
758 }
759 
760 
761 Register LCodeGen::ToRegister(LOperand* op) const {
762  ASSERT(op->IsRegister());
763  return ToRegister(op->index());
764 }
765 
766 
767 X87Register LCodeGen::ToX87Register(LOperand* op) const {
768  ASSERT(op->IsDoubleRegister());
769  return ToX87Register(op->index());
770 }
771 
772 
773 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
774  ASSERT(op->IsDoubleRegister());
775  return ToDoubleRegister(op->index());
776 }
777 
778 
779 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
781 }
782 
783 
784 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
785  const Representation& r) const {
786  HConstant* constant = chunk_->LookupConstant(op);
787  int32_t value = constant->Integer32Value();
788  if (r.IsInteger32()) return value;
789  ASSERT(r.IsSmiOrTagged());
790  return reinterpret_cast<int32_t>(Smi::FromInt(value));
791 }
792 
793 
794 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
795  HConstant* constant = chunk_->LookupConstant(op);
796  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
797  return constant->handle(isolate());
798 }
799 
800 
801 double LCodeGen::ToDouble(LConstantOperand* op) const {
802  HConstant* constant = chunk_->LookupConstant(op);
803  ASSERT(constant->HasDoubleValue());
804  return constant->DoubleValue();
805 }
806 
807 
808 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
809  HConstant* constant = chunk_->LookupConstant(op);
810  ASSERT(constant->HasExternalReferenceValue());
811  return constant->ExternalReferenceValue();
812 }
813 
814 
815 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
816  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
817 }
818 
819 
820 bool LCodeGen::IsSmi(LConstantOperand* op) const {
821  return chunk_->LookupLiteralRepresentation(op).IsSmi();
822 }
823 
824 
825 static int ArgumentsOffsetWithoutFrame(int index) {
826  ASSERT(index < 0);
827  return -(index + 1) * kPointerSize + kPCOnStackSize;
828 }
829 
830 
831 Operand LCodeGen::ToOperand(LOperand* op) const {
832  if (op->IsRegister()) return Operand(ToRegister(op));
833  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
834  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
835  if (NeedsEagerFrame()) {
836  return Operand(ebp, StackSlotOffset(op->index()));
837  } else {
838  // Retrieve parameter without eager stack-frame relative to the
839  // stack-pointer.
840  return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
841  }
842 }
843 
844 
845 Operand LCodeGen::HighOperand(LOperand* op) {
846  ASSERT(op->IsDoubleStackSlot());
847  if (NeedsEagerFrame()) {
848  return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
849  } else {
850  // Retrieve parameter without eager stack-frame relative to the
851  // stack-pointer.
852  return Operand(
853  esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
854  }
855 }
856 
857 
858 void LCodeGen::WriteTranslation(LEnvironment* environment,
859  Translation* translation) {
860  if (environment == NULL) return;
861 
862  // The translation includes one command per value in the environment.
863  int translation_size = environment->translation_size();
864  // The output frame height does not include the parameters.
865  int height = translation_size - environment->parameter_count();
866 
867  WriteTranslation(environment->outer(), translation);
868  bool has_closure_id = !info()->closure().is_null() &&
869  !info()->closure().is_identical_to(environment->closure());
870  int closure_id = has_closure_id
871  ? DefineDeoptimizationLiteral(environment->closure())
872  : Translation::kSelfLiteralId;
873  switch (environment->frame_type()) {
874  case JS_FUNCTION:
875  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
876  break;
877  case JS_CONSTRUCT:
878  translation->BeginConstructStubFrame(closure_id, translation_size);
879  break;
880  case JS_GETTER:
881  ASSERT(translation_size == 1);
882  ASSERT(height == 0);
883  translation->BeginGetterStubFrame(closure_id);
884  break;
885  case JS_SETTER:
886  ASSERT(translation_size == 2);
887  ASSERT(height == 0);
888  translation->BeginSetterStubFrame(closure_id);
889  break;
890  case ARGUMENTS_ADAPTOR:
891  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
892  break;
893  case STUB:
894  translation->BeginCompiledStubFrame();
895  break;
896  default:
897  UNREACHABLE();
898  }
899 
900  int object_index = 0;
901  int dematerialized_index = 0;
902  for (int i = 0; i < translation_size; ++i) {
903  LOperand* value = environment->values()->at(i);
904  AddToTranslation(environment,
905  translation,
906  value,
907  environment->HasTaggedValueAt(i),
908  environment->HasUint32ValueAt(i),
909  &object_index,
910  &dematerialized_index);
911  }
912 }
913 
914 
915 void LCodeGen::AddToTranslation(LEnvironment* environment,
916  Translation* translation,
917  LOperand* op,
918  bool is_tagged,
919  bool is_uint32,
920  int* object_index_pointer,
921  int* dematerialized_index_pointer) {
922  if (op == LEnvironment::materialization_marker()) {
923  int object_index = (*object_index_pointer)++;
924  if (environment->ObjectIsDuplicateAt(object_index)) {
925  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
926  translation->DuplicateObject(dupe_of);
927  return;
928  }
929  int object_length = environment->ObjectLengthAt(object_index);
930  if (environment->ObjectIsArgumentsAt(object_index)) {
931  translation->BeginArgumentsObject(object_length);
932  } else {
933  translation->BeginCapturedObject(object_length);
934  }
935  int dematerialized_index = *dematerialized_index_pointer;
936  int env_offset = environment->translation_size() + dematerialized_index;
937  *dematerialized_index_pointer += object_length;
938  for (int i = 0; i < object_length; ++i) {
939  LOperand* value = environment->values()->at(env_offset + i);
940  AddToTranslation(environment,
941  translation,
942  value,
943  environment->HasTaggedValueAt(env_offset + i),
944  environment->HasUint32ValueAt(env_offset + i),
945  object_index_pointer,
946  dematerialized_index_pointer);
947  }
948  return;
949  }
950 
951  if (op->IsStackSlot()) {
952  if (is_tagged) {
953  translation->StoreStackSlot(op->index());
954  } else if (is_uint32) {
955  translation->StoreUint32StackSlot(op->index());
956  } else {
957  translation->StoreInt32StackSlot(op->index());
958  }
959  } else if (op->IsDoubleStackSlot()) {
960  translation->StoreDoubleStackSlot(op->index());
961  } else if (op->IsRegister()) {
962  Register reg = ToRegister(op);
963  if (is_tagged) {
964  translation->StoreRegister(reg);
965  } else if (is_uint32) {
966  translation->StoreUint32Register(reg);
967  } else {
968  translation->StoreInt32Register(reg);
969  }
970  } else if (op->IsDoubleRegister()) {
971  XMMRegister reg = ToDoubleRegister(op);
972  translation->StoreDoubleRegister(reg);
973  } else if (op->IsConstantOperand()) {
974  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
975  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
976  translation->StoreLiteral(src_index);
977  } else {
978  UNREACHABLE();
979  }
980 }
981 
982 
983 void LCodeGen::CallCodeGeneric(Handle<Code> code,
984  RelocInfo::Mode mode,
985  LInstruction* instr,
986  SafepointMode safepoint_mode) {
987  ASSERT(instr != NULL);
988  __ call(code, mode);
989  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
990 
991  // Signal that we don't inline smi code before these stubs in the
992  // optimizing code generator.
993  if (code->kind() == Code::BINARY_OP_IC ||
994  code->kind() == Code::COMPARE_IC) {
995  __ nop();
996  }
997 }
998 
999 
1000 void LCodeGen::CallCode(Handle<Code> code,
1001  RelocInfo::Mode mode,
1002  LInstruction* instr) {
1003  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
1004 }
1005 
1006 
1007 void LCodeGen::CallRuntime(const Runtime::Function* fun,
1008  int argc,
1009  LInstruction* instr,
1010  SaveFPRegsMode save_doubles) {
1011  ASSERT(instr != NULL);
1012  ASSERT(instr->HasPointerMap());
1013 
1014  __ CallRuntime(fun, argc, save_doubles);
1015 
1016  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1017 
1018  ASSERT(info()->is_calling());
1019 }
1020 
1021 
1022 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
1023  if (context->IsRegister()) {
1024  if (!ToRegister(context).is(esi)) {
1025  __ mov(esi, ToRegister(context));
1026  }
1027  } else if (context->IsStackSlot()) {
1028  __ mov(esi, ToOperand(context));
1029  } else if (context->IsConstantOperand()) {
1030  HConstant* constant =
1031  chunk_->LookupConstant(LConstantOperand::cast(context));
1032  __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
1033  } else {
1034  UNREACHABLE();
1035  }
1036 }
1037 
1038 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
1039  int argc,
1040  LInstruction* instr,
1041  LOperand* context) {
1042  LoadContextFromDeferred(context);
1043 
1044  __ CallRuntimeSaveDoubles(id);
1045  RecordSafepointWithRegisters(
1046  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
1047 
1048  ASSERT(info()->is_calling());
1049 }
1050 
1051 
1052 void LCodeGen::RegisterEnvironmentForDeoptimization(
1053  LEnvironment* environment, Safepoint::DeoptMode mode) {
1054  if (!environment->HasBeenRegistered()) {
1055  // Physical stack frame layout:
1056  // -x ............. -4 0 ..................................... y
1057  // [incoming arguments] [spill slots] [pushed outgoing arguments]
1058 
1059  // Layout of the environment:
1060  // 0 ..................................................... size-1
1061  // [parameters] [locals] [expression stack including arguments]
1062 
1063  // Layout of the translation:
1064  // 0 ........................................................ size - 1 + 4
1065  // [expression stack including arguments] [locals] [4 words] [parameters]
1066  // |>------------ translation_size ------------<|
1067 
1068  int frame_count = 0;
1069  int jsframe_count = 0;
1070  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
1071  ++frame_count;
1072  if (e->frame_type() == JS_FUNCTION) {
1073  ++jsframe_count;
1074  }
1075  }
1076  Translation translation(&translations_, frame_count, jsframe_count, zone());
1077  WriteTranslation(environment, &translation);
1078  int deoptimization_index = deoptimizations_.length();
1079  int pc_offset = masm()->pc_offset();
1080  environment->Register(deoptimization_index,
1081  translation.index(),
1082  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1083  deoptimizations_.Add(environment, zone());
1084  }
1085 }
1086 
1087 
1088 void LCodeGen::DeoptimizeIf(Condition cc,
1089  LEnvironment* environment,
1090  Deoptimizer::BailoutType bailout_type) {
1091  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1092  ASSERT(environment->HasBeenRegistered());
1093  int id = environment->deoptimization_index();
1094  ASSERT(info()->IsOptimizing() || info()->IsStub());
1095  Address entry =
1096  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1097  if (entry == NULL) {
1098  Abort(kBailoutWasNotPrepared);
1099  return;
1100  }
1101 
1102  if (DeoptEveryNTimes()) {
1103  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1104  Label no_deopt;
1105  __ pushfd();
1106  __ push(eax);
1107  __ mov(eax, Operand::StaticVariable(count));
1108  __ sub(eax, Immediate(1));
1109  __ j(not_zero, &no_deopt, Label::kNear);
1110  if (FLAG_trap_on_deopt) __ int3();
1111  __ mov(eax, Immediate(FLAG_deopt_every_n_times));
1112  __ mov(Operand::StaticVariable(count), eax);
1113  __ pop(eax);
1114  __ popfd();
1115  ASSERT(frame_is_built_);
1116  __ call(entry, RelocInfo::RUNTIME_ENTRY);
1117  __ bind(&no_deopt);
1118  __ mov(Operand::StaticVariable(count), eax);
1119  __ pop(eax);
1120  __ popfd();
1121  }
1122 
1123  // Before Instructions which can deopt, we normally flush the x87 stack. But
1124  // we can have inputs or outputs of the current instruction on the stack,
1125  // thus we need to flush them here from the physical stack to leave it in a
1126  // consistent state.
1127  if (x87_stack_.depth() > 0) {
1128  Label done;
1129  if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1130  EmitFlushX87ForDeopt();
1131  __ bind(&done);
1132  }
1133 
1134  if (info()->ShouldTrapOnDeopt()) {
1135  Label done;
1136  if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1137  __ int3();
1138  __ bind(&done);
1139  }
1140 
1141  ASSERT(info()->IsStub() || frame_is_built_);
1142  if (cc == no_condition && frame_is_built_) {
1143  __ call(entry, RelocInfo::RUNTIME_ENTRY);
1144  } else {
1145  // We often have several deopts to the same entry, reuse the last
1146  // jump entry if this is the case.
1147  if (jump_table_.is_empty() ||
1148  jump_table_.last().address != entry ||
1149  jump_table_.last().needs_frame != !frame_is_built_ ||
1150  jump_table_.last().bailout_type != bailout_type) {
1151  Deoptimizer::JumpTableEntry table_entry(entry,
1152  bailout_type,
1153  !frame_is_built_);
1154  jump_table_.Add(table_entry, zone());
1155  }
1156  if (cc == no_condition) {
1157  __ jmp(&jump_table_.last().label);
1158  } else {
1159  __ j(cc, &jump_table_.last().label);
1160  }
1161  }
1162 }
1163 
1164 
1165 void LCodeGen::DeoptimizeIf(Condition cc,
1166  LEnvironment* environment) {
1167  Deoptimizer::BailoutType bailout_type = info()->IsStub()
1170  DeoptimizeIf(cc, environment, bailout_type);
1171 }
1172 
1173 
1174 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
1175  int length = deoptimizations_.length();
1176  if (length == 0) return;
1177  Handle<DeoptimizationInputData> data =
1178  factory()->NewDeoptimizationInputData(length, TENURED);
1179 
1180  Handle<ByteArray> translations =
1181  translations_.CreateByteArray(isolate()->factory());
1182  data->SetTranslationByteArray(*translations);
1183  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
1184  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
1185  if (info_->IsOptimizing()) {
1186  // Reference to shared function info does not change between phases.
1187  AllowDeferredHandleDereference allow_handle_dereference;
1188  data->SetSharedFunctionInfo(*info_->shared_info());
1189  } else {
1190  data->SetSharedFunctionInfo(Smi::FromInt(0));
1191  }
1192 
1193  Handle<FixedArray> literals =
1194  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
1195  { AllowDeferredHandleDereference copy_handles;
1196  for (int i = 0; i < deoptimization_literals_.length(); i++) {
1197  literals->set(i, *deoptimization_literals_[i]);
1198  }
1199  data->SetLiteralArray(*literals);
1200  }
1201 
1202  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
1203  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
1204 
1205  // Populate the deoptimization entries.
1206  for (int i = 0; i < length; i++) {
1207  LEnvironment* env = deoptimizations_[i];
1208  data->SetAstId(i, env->ast_id());
1209  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
1210  data->SetArgumentsStackHeight(i,
1211  Smi::FromInt(env->arguments_stack_height()));
1212  data->SetPc(i, Smi::FromInt(env->pc_offset()));
1213  }
1214  code->set_deoptimization_data(*data);
1215 }
1216 
1217 
1218 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
1219  int result = deoptimization_literals_.length();
1220  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
1221  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
1222  }
1223  deoptimization_literals_.Add(literal, zone());
1224  return result;
1225 }
1226 
1227 
1228 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
1229  ASSERT(deoptimization_literals_.length() == 0);
1230 
1231  const ZoneList<Handle<JSFunction> >* inlined_closures =
1232  chunk()->inlined_closures();
1233 
1234  for (int i = 0, length = inlined_closures->length();
1235  i < length;
1236  i++) {
1237  DefineDeoptimizationLiteral(inlined_closures->at(i));
1238  }
1239 
1240  inlined_function_count_ = deoptimization_literals_.length();
1241 }
1242 
1243 
1244 void LCodeGen::RecordSafepointWithLazyDeopt(
1245  LInstruction* instr, SafepointMode safepoint_mode) {
1246  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1247  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1248  } else {
1249  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1250  RecordSafepointWithRegisters(
1251  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1252  }
1253 }
1254 
1255 
1256 void LCodeGen::RecordSafepoint(
1257  LPointerMap* pointers,
1258  Safepoint::Kind kind,
1259  int arguments,
1260  Safepoint::DeoptMode deopt_mode) {
1261  ASSERT(kind == expected_safepoint_kind_);
1262  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1263  Safepoint safepoint =
1264  safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1265  for (int i = 0; i < operands->length(); i++) {
1266  LOperand* pointer = operands->at(i);
1267  if (pointer->IsStackSlot()) {
1268  safepoint.DefinePointerSlot(pointer->index(), zone());
1269  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1270  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1271  }
1272  }
1273 }
1274 
1275 
1276 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1277  Safepoint::DeoptMode mode) {
1278  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1279 }
1280 
1281 
1282 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1283  LPointerMap empty_pointers(zone());
1284  RecordSafepoint(&empty_pointers, mode);
1285 }
1286 
1287 
1288 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1289  int arguments,
1290  Safepoint::DeoptMode mode) {
1291  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1292 }
1293 
1294 
1295 void LCodeGen::RecordAndWritePosition(int position) {
1296  if (position == RelocInfo::kNoPosition) return;
1297  masm()->positions_recorder()->RecordPosition(position);
1298  masm()->positions_recorder()->WriteRecordedPositions();
1299 }
1300 
1301 
1302 static const char* LabelType(LLabel* label) {
1303  if (label->is_loop_header()) return " (loop header)";
1304  if (label->is_osr_entry()) return " (OSR entry)";
1305  return "";
1306 }
1307 
1308 
1309 void LCodeGen::DoLabel(LLabel* label) {
1310  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1311  current_instruction_,
1312  label->hydrogen_value()->id(),
1313  label->block_id(),
1314  LabelType(label));
1315  __ bind(label->label());
1316  current_block_ = label->block_id();
1317  DoGap(label);
1318 }
1319 
1320 
1321 void LCodeGen::DoParallelMove(LParallelMove* move) {
1322  resolver_.Resolve(move);
1323 }
1324 
1325 
1326 void LCodeGen::DoGap(LGap* gap) {
1327  for (int i = LGap::FIRST_INNER_POSITION;
1329  i++) {
1330  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1331  LParallelMove* move = gap->GetParallelMove(inner_pos);
1332  if (move != NULL) DoParallelMove(move);
1333  }
1334 }
1335 
1336 
1337 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1338  DoGap(instr);
1339 }
1340 
1341 
1342 void LCodeGen::DoParameter(LParameter* instr) {
1343  // Nothing to do.
1344 }
1345 
1346 
1347 void LCodeGen::DoCallStub(LCallStub* instr) {
1348  ASSERT(ToRegister(instr->context()).is(esi));
1349  ASSERT(ToRegister(instr->result()).is(eax));
1350  switch (instr->hydrogen()->major_key()) {
1351  case CodeStub::RegExpExec: {
1352  RegExpExecStub stub;
1353  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1354  break;
1355  }
1356  case CodeStub::SubString: {
1357  SubStringStub stub;
1358  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1359  break;
1360  }
1361  case CodeStub::StringCompare: {
1362  StringCompareStub stub;
1363  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1364  break;
1365  }
1366  default:
1367  UNREACHABLE();
1368  }
1369 }
1370 
1371 
1372 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1373  GenerateOsrPrologue();
1374 }
1375 
1376 
1377 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1378  Register dividend = ToRegister(instr->dividend());
1379  int32_t divisor = instr->divisor();
1380  ASSERT(dividend.is(ToRegister(instr->result())));
1381 
1382  // Theoretically, a variation of the branch-free code for integer division by
1383  // a power of 2 (calculating the remainder via an additional multiplication
1384  // (which gets simplified to an 'and') and subtraction) should be faster, and
1385  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1386  // indicate that positive dividends are heavily favored, so the branching
1387  // version performs better.
1388  HMod* hmod = instr->hydrogen();
1389  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1390  Label dividend_is_not_negative, done;
1391  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1392  __ test(dividend, dividend);
1393  __ j(not_sign, &dividend_is_not_negative, Label::kNear);
1394  // Note that this is correct even for kMinInt operands.
1395  __ neg(dividend);
1396  __ and_(dividend, mask);
1397  __ neg(dividend);
1398  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1399  DeoptimizeIf(zero, instr->environment());
1400  }
1401  __ jmp(&done, Label::kNear);
1402  }
1403 
1404  __ bind(&dividend_is_not_negative);
1405  __ and_(dividend, mask);
1406  __ bind(&done);
1407 }
1408 
1409 
1410 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1411  Register dividend = ToRegister(instr->dividend());
1412  int32_t divisor = instr->divisor();
1413  ASSERT(ToRegister(instr->result()).is(eax));
1414 
1415  if (divisor == 0) {
1416  DeoptimizeIf(no_condition, instr->environment());
1417  return;
1418  }
1419 
1420  __ TruncatingDiv(dividend, Abs(divisor));
1421  __ imul(edx, edx, Abs(divisor));
1422  __ mov(eax, dividend);
1423  __ sub(eax, edx);
1424 
1425  // Check for negative zero.
1426  HMod* hmod = instr->hydrogen();
1427  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1428  Label remainder_not_zero;
1429  __ j(not_zero, &remainder_not_zero, Label::kNear);
1430  __ cmp(dividend, Immediate(0));
1431  DeoptimizeIf(less, instr->environment());
1432  __ bind(&remainder_not_zero);
1433  }
1434 }
1435 
1436 
1437 void LCodeGen::DoModI(LModI* instr) {
1438  HMod* hmod = instr->hydrogen();
1439 
1440  Register left_reg = ToRegister(instr->left());
1441  ASSERT(left_reg.is(eax));
1442  Register right_reg = ToRegister(instr->right());
1443  ASSERT(!right_reg.is(eax));
1444  ASSERT(!right_reg.is(edx));
1445  Register result_reg = ToRegister(instr->result());
1446  ASSERT(result_reg.is(edx));
1447 
1448  Label done;
1449  // Check for x % 0, idiv would signal a divide error. We have to
1450  // deopt in this case because we can't return a NaN.
1451  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1452  __ test(right_reg, Operand(right_reg));
1453  DeoptimizeIf(zero, instr->environment());
1454  }
1455 
1456  // Check for kMinInt % -1, idiv would signal a divide error. We
1457  // have to deopt if we care about -0, because we can't return that.
1458  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1459  Label no_overflow_possible;
1460  __ cmp(left_reg, kMinInt);
1461  __ j(not_equal, &no_overflow_possible, Label::kNear);
1462  __ cmp(right_reg, -1);
1463  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1464  DeoptimizeIf(equal, instr->environment());
1465  } else {
1466  __ j(not_equal, &no_overflow_possible, Label::kNear);
1467  __ Move(result_reg, Immediate(0));
1468  __ jmp(&done, Label::kNear);
1469  }
1470  __ bind(&no_overflow_possible);
1471  }
1472 
1473  // Sign extend dividend in eax into edx:eax.
1474  __ cdq();
1475 
1476  // If we care about -0, test if the dividend is <0 and the result is 0.
1477  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1478  Label positive_left;
1479  __ test(left_reg, Operand(left_reg));
1480  __ j(not_sign, &positive_left, Label::kNear);
1481  __ idiv(right_reg);
1482  __ test(result_reg, Operand(result_reg));
1483  DeoptimizeIf(zero, instr->environment());
1484  __ jmp(&done, Label::kNear);
1485  __ bind(&positive_left);
1486  }
1487  __ idiv(right_reg);
1488  __ bind(&done);
1489 }
1490 
1491 
1492 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1493  Register dividend = ToRegister(instr->dividend());
1494  int32_t divisor = instr->divisor();
1495  Register result = ToRegister(instr->result());
1496  ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
1497  ASSERT(!result.is(dividend));
1498 
1499  // Check for (0 / -x) that will produce negative zero.
1500  HDiv* hdiv = instr->hydrogen();
1501  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1502  __ test(dividend, dividend);
1503  DeoptimizeIf(zero, instr->environment());
1504  }
1505  // Check for (kMinInt / -1).
1506  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1507  __ cmp(dividend, kMinInt);
1508  DeoptimizeIf(zero, instr->environment());
1509  }
1510  // Deoptimize if remainder will not be 0.
1511  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1512  divisor != 1 && divisor != -1) {
1513  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1514  __ test(dividend, Immediate(mask));
1515  DeoptimizeIf(not_zero, instr->environment());
1516  }
1517  __ Move(result, dividend);
1518  int32_t shift = WhichPowerOf2Abs(divisor);
1519  if (shift > 0) {
1520  // The arithmetic shift is always OK, the 'if' is an optimization only.
1521  if (shift > 1) __ sar(result, 31);
1522  __ shr(result, 32 - shift);
1523  __ add(result, dividend);
1524  __ sar(result, shift);
1525  }
1526  if (divisor < 0) __ neg(result);
1527 }
1528 
1529 
1530 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1531  Register dividend = ToRegister(instr->dividend());
1532  int32_t divisor = instr->divisor();
1533  ASSERT(ToRegister(instr->result()).is(edx));
1534 
1535  if (divisor == 0) {
1536  DeoptimizeIf(no_condition, instr->environment());
1537  return;
1538  }
1539 
1540  // Check for (0 / -x) that will produce negative zero.
1541  HDiv* hdiv = instr->hydrogen();
1542  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1543  __ test(dividend, dividend);
1544  DeoptimizeIf(zero, instr->environment());
1545  }
1546 
1547  __ TruncatingDiv(dividend, Abs(divisor));
1548  if (divisor < 0) __ neg(edx);
1549 
1550  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1551  __ mov(eax, edx);
1552  __ imul(eax, eax, divisor);
1553  __ sub(eax, dividend);
1554  DeoptimizeIf(not_equal, instr->environment());
1555  }
1556 }
1557 
1558 
1559 void LCodeGen::DoDivI(LDivI* instr) {
1560  HBinaryOperation* hdiv = instr->hydrogen();
1561  Register dividend = ToRegister(instr->left());
1562  Register divisor = ToRegister(instr->right());
1563  Register remainder = ToRegister(instr->temp());
1564  Register result = ToRegister(instr->result());
1565  ASSERT(dividend.is(eax));
1566  ASSERT(remainder.is(edx));
1567  ASSERT(result.is(eax));
1568  ASSERT(!divisor.is(eax));
1569  ASSERT(!divisor.is(edx));
1570 
1571  // Check for x / 0.
1572  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1573  __ test(divisor, divisor);
1574  DeoptimizeIf(zero, instr->environment());
1575  }
1576 
1577  // Check for (0 / -x) that will produce negative zero.
1578  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1579  Label dividend_not_zero;
1580  __ test(dividend, dividend);
1581  __ j(not_zero, &dividend_not_zero, Label::kNear);
1582  __ test(divisor, divisor);
1583  DeoptimizeIf(sign, instr->environment());
1584  __ bind(&dividend_not_zero);
1585  }
1586 
1587  // Check for (kMinInt / -1).
1588  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1589  Label dividend_not_min_int;
1590  __ cmp(dividend, kMinInt);
1591  __ j(not_zero, &dividend_not_min_int, Label::kNear);
1592  __ cmp(divisor, -1);
1593  DeoptimizeIf(zero, instr->environment());
1594  __ bind(&dividend_not_min_int);
1595  }
1596 
1597  // Sign extend to edx (= remainder).
1598  __ cdq();
1599  __ idiv(divisor);
1600 
1601  if (hdiv->IsMathFloorOfDiv()) {
1602  Label done;
1603  __ test(remainder, remainder);
1604  __ j(zero, &done, Label::kNear);
1605  __ xor_(remainder, divisor);
1606  __ sar(remainder, 31);
1607  __ add(result, remainder);
1608  __ bind(&done);
1609  } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1610  // Deoptimize if remainder is not 0.
1611  __ test(remainder, remainder);
1612  DeoptimizeIf(not_zero, instr->environment());
1613  }
1614 }
1615 
1616 
1617 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1618  Register dividend = ToRegister(instr->dividend());
1619  int32_t divisor = instr->divisor();
1620  ASSERT(dividend.is(ToRegister(instr->result())));
1621 
1622  // If the divisor is positive, things are easy: There can be no deopts and we
1623  // can simply do an arithmetic right shift.
1624  if (divisor == 1) return;
1625  int32_t shift = WhichPowerOf2Abs(divisor);
1626  if (divisor > 1) {
1627  __ sar(dividend, shift);
1628  return;
1629  }
1630 
1631  // If the divisor is negative, we have to negate and handle edge cases.
1632  Label not_kmin_int, done;
1633  __ neg(dividend);
1634  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1635  DeoptimizeIf(zero, instr->environment());
1636  }
1637  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1638  // Note that we could emit branch-free code, but that would need one more
1639  // register.
1640  if (divisor == -1) {
1641  DeoptimizeIf(overflow, instr->environment());
1642  } else {
1643  __ j(no_overflow, &not_kmin_int, Label::kNear);
1644  __ mov(dividend, Immediate(kMinInt / divisor));
1645  __ jmp(&done, Label::kNear);
1646  }
1647  }
1648  __ bind(&not_kmin_int);
1649  __ sar(dividend, shift);
1650  __ bind(&done);
1651 }
1652 
1653 
1654 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1655  Register dividend = ToRegister(instr->dividend());
1656  int32_t divisor = instr->divisor();
1657  ASSERT(ToRegister(instr->result()).is(edx));
1658 
1659  if (divisor == 0) {
1660  DeoptimizeIf(no_condition, instr->environment());
1661  return;
1662  }
1663 
1664  // Check for (0 / -x) that will produce negative zero.
1665  HMathFloorOfDiv* hdiv = instr->hydrogen();
1666  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1667  __ test(dividend, dividend);
1668  DeoptimizeIf(zero, instr->environment());
1669  }
1670 
1671  // Easy case: We need no dynamic check for the dividend and the flooring
1672  // division is the same as the truncating division.
1673  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1674  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1675  __ TruncatingDiv(dividend, Abs(divisor));
1676  if (divisor < 0) __ neg(edx);
1677  return;
1678  }
1679 
1680  // In the general case we may need to adjust before and after the truncating
1681  // division to get a flooring division.
1682  Register temp = ToRegister(instr->temp3());
1683  ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1684  Label needs_adjustment, done;
1685  __ cmp(dividend, Immediate(0));
1686  __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1687  __ TruncatingDiv(dividend, Abs(divisor));
1688  if (divisor < 0) __ neg(edx);
1689  __ jmp(&done, Label::kNear);
1690  __ bind(&needs_adjustment);
1691  __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1692  __ TruncatingDiv(temp, Abs(divisor));
1693  if (divisor < 0) __ neg(edx);
1694  __ dec(edx);
1695  __ bind(&done);
1696 }
1697 
1698 
1699 void LCodeGen::DoMulI(LMulI* instr) {
1700  Register left = ToRegister(instr->left());
1701  LOperand* right = instr->right();
1702 
1703  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1704  __ mov(ToRegister(instr->temp()), left);
1705  }
1706 
1707  if (right->IsConstantOperand()) {
1708  // Try strength reductions on the multiplication.
1709  // All replacement instructions are at most as long as the imul
1710  // and have better latency.
1711  int constant = ToInteger32(LConstantOperand::cast(right));
1712  if (constant == -1) {
1713  __ neg(left);
1714  } else if (constant == 0) {
1715  __ xor_(left, Operand(left));
1716  } else if (constant == 2) {
1717  __ add(left, Operand(left));
1718  } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1719  // If we know that the multiplication can't overflow, it's safe to
1720  // use instructions that don't set the overflow flag for the
1721  // multiplication.
1722  switch (constant) {
1723  case 1:
1724  // Do nothing.
1725  break;
1726  case 3:
1727  __ lea(left, Operand(left, left, times_2, 0));
1728  break;
1729  case 4:
1730  __ shl(left, 2);
1731  break;
1732  case 5:
1733  __ lea(left, Operand(left, left, times_4, 0));
1734  break;
1735  case 8:
1736  __ shl(left, 3);
1737  break;
1738  case 9:
1739  __ lea(left, Operand(left, left, times_8, 0));
1740  break;
1741  case 16:
1742  __ shl(left, 4);
1743  break;
1744  default:
1745  __ imul(left, left, constant);
1746  break;
1747  }
1748  } else {
1749  __ imul(left, left, constant);
1750  }
1751  } else {
1752  if (instr->hydrogen()->representation().IsSmi()) {
1753  __ SmiUntag(left);
1754  }
1755  __ imul(left, ToOperand(right));
1756  }
1757 
1758  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1759  DeoptimizeIf(overflow, instr->environment());
1760  }
1761 
1762  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1763  // Bail out if the result is supposed to be negative zero.
1764  Label done;
1765  __ test(left, Operand(left));
1766  __ j(not_zero, &done, Label::kNear);
1767  if (right->IsConstantOperand()) {
1768  if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1769  DeoptimizeIf(no_condition, instr->environment());
1770  } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1771  __ cmp(ToRegister(instr->temp()), Immediate(0));
1772  DeoptimizeIf(less, instr->environment());
1773  }
1774  } else {
1775  // Test the non-zero operand for negative sign.
1776  __ or_(ToRegister(instr->temp()), ToOperand(right));
1777  DeoptimizeIf(sign, instr->environment());
1778  }
1779  __ bind(&done);
1780  }
1781 }
1782 
1783 
1784 void LCodeGen::DoBitI(LBitI* instr) {
1785  LOperand* left = instr->left();
1786  LOperand* right = instr->right();
1787  ASSERT(left->Equals(instr->result()));
1788  ASSERT(left->IsRegister());
1789 
1790  if (right->IsConstantOperand()) {
1791  int32_t right_operand =
1792  ToRepresentation(LConstantOperand::cast(right),
1793  instr->hydrogen()->representation());
1794  switch (instr->op()) {
1795  case Token::BIT_AND:
1796  __ and_(ToRegister(left), right_operand);
1797  break;
1798  case Token::BIT_OR:
1799  __ or_(ToRegister(left), right_operand);
1800  break;
1801  case Token::BIT_XOR:
1802  if (right_operand == int32_t(~0)) {
1803  __ not_(ToRegister(left));
1804  } else {
1805  __ xor_(ToRegister(left), right_operand);
1806  }
1807  break;
1808  default:
1809  UNREACHABLE();
1810  break;
1811  }
1812  } else {
1813  switch (instr->op()) {
1814  case Token::BIT_AND:
1815  __ and_(ToRegister(left), ToOperand(right));
1816  break;
1817  case Token::BIT_OR:
1818  __ or_(ToRegister(left), ToOperand(right));
1819  break;
1820  case Token::BIT_XOR:
1821  __ xor_(ToRegister(left), ToOperand(right));
1822  break;
1823  default:
1824  UNREACHABLE();
1825  break;
1826  }
1827  }
1828 }
1829 
1830 
1831 void LCodeGen::DoShiftI(LShiftI* instr) {
1832  LOperand* left = instr->left();
1833  LOperand* right = instr->right();
1834  ASSERT(left->Equals(instr->result()));
1835  ASSERT(left->IsRegister());
1836  if (right->IsRegister()) {
1837  ASSERT(ToRegister(right).is(ecx));
1838 
1839  switch (instr->op()) {
1840  case Token::ROR:
1841  __ ror_cl(ToRegister(left));
1842  if (instr->can_deopt()) {
1843  __ test(ToRegister(left), ToRegister(left));
1844  DeoptimizeIf(sign, instr->environment());
1845  }
1846  break;
1847  case Token::SAR:
1848  __ sar_cl(ToRegister(left));
1849  break;
1850  case Token::SHR:
1851  __ shr_cl(ToRegister(left));
1852  if (instr->can_deopt()) {
1853  __ test(ToRegister(left), ToRegister(left));
1854  DeoptimizeIf(sign, instr->environment());
1855  }
1856  break;
1857  case Token::SHL:
1858  __ shl_cl(ToRegister(left));
1859  break;
1860  default:
1861  UNREACHABLE();
1862  break;
1863  }
1864  } else {
1865  int value = ToInteger32(LConstantOperand::cast(right));
1866  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1867  switch (instr->op()) {
1868  case Token::ROR:
1869  if (shift_count == 0 && instr->can_deopt()) {
1870  __ test(ToRegister(left), ToRegister(left));
1871  DeoptimizeIf(sign, instr->environment());
1872  } else {
1873  __ ror(ToRegister(left), shift_count);
1874  }
1875  break;
1876  case Token::SAR:
1877  if (shift_count != 0) {
1878  __ sar(ToRegister(left), shift_count);
1879  }
1880  break;
1881  case Token::SHR:
1882  if (shift_count == 0 && instr->can_deopt()) {
1883  __ test(ToRegister(left), ToRegister(left));
1884  DeoptimizeIf(sign, instr->environment());
1885  } else {
1886  __ shr(ToRegister(left), shift_count);
1887  }
1888  break;
1889  case Token::SHL:
1890  if (shift_count != 0) {
1891  if (instr->hydrogen_value()->representation().IsSmi() &&
1892  instr->can_deopt()) {
1893  if (shift_count != 1) {
1894  __ shl(ToRegister(left), shift_count - 1);
1895  }
1896  __ SmiTag(ToRegister(left));
1897  DeoptimizeIf(overflow, instr->environment());
1898  } else {
1899  __ shl(ToRegister(left), shift_count);
1900  }
1901  }
1902  break;
1903  default:
1904  UNREACHABLE();
1905  break;
1906  }
1907  }
1908 }
1909 
1910 
1911 void LCodeGen::DoSubI(LSubI* instr) {
1912  LOperand* left = instr->left();
1913  LOperand* right = instr->right();
1914  ASSERT(left->Equals(instr->result()));
1915 
1916  if (right->IsConstantOperand()) {
1917  __ sub(ToOperand(left),
1918  ToImmediate(right, instr->hydrogen()->representation()));
1919  } else {
1920  __ sub(ToRegister(left), ToOperand(right));
1921  }
1922  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1923  DeoptimizeIf(overflow, instr->environment());
1924  }
1925 }
1926 
1927 
1928 void LCodeGen::DoConstantI(LConstantI* instr) {
1929  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1930 }
1931 
1932 
1933 void LCodeGen::DoConstantS(LConstantS* instr) {
1934  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1935 }
1936 
1937 
1938 void LCodeGen::DoConstantD(LConstantD* instr) {
1939  double v = instr->value();
1940  uint64_t int_val = BitCast<uint64_t, double>(v);
1941  int32_t lower = static_cast<int32_t>(int_val);
1942  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1943  ASSERT(instr->result()->IsDoubleRegister());
1944 
1946  __ push(Immediate(upper));
1947  __ push(Immediate(lower));
1948  X87Register reg = ToX87Register(instr->result());
1949  X87Mov(reg, Operand(esp, 0));
1950  __ add(Operand(esp), Immediate(kDoubleSize));
1951  } else {
1952  CpuFeatureScope scope1(masm(), SSE2);
1953  XMMRegister res = ToDoubleRegister(instr->result());
1954  if (int_val == 0) {
1955  __ xorps(res, res);
1956  } else {
1957  Register temp = ToRegister(instr->temp());
1959  CpuFeatureScope scope2(masm(), SSE4_1);
1960  if (lower != 0) {
1961  __ Move(temp, Immediate(lower));
1962  __ movd(res, Operand(temp));
1963  __ Move(temp, Immediate(upper));
1964  __ pinsrd(res, Operand(temp), 1);
1965  } else {
1966  __ xorps(res, res);
1967  __ Move(temp, Immediate(upper));
1968  __ pinsrd(res, Operand(temp), 1);
1969  }
1970  } else {
1971  __ Move(temp, Immediate(upper));
1972  __ movd(res, Operand(temp));
1973  __ psllq(res, 32);
1974  if (lower != 0) {
1975  XMMRegister xmm_scratch = double_scratch0();
1976  __ Move(temp, Immediate(lower));
1977  __ movd(xmm_scratch, Operand(temp));
1978  __ orps(res, xmm_scratch);
1979  }
1980  }
1981  }
1982  }
1983 }
1984 
1985 
1986 void LCodeGen::DoConstantE(LConstantE* instr) {
1987  __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1988 }
1989 
1990 
1991 void LCodeGen::DoConstantT(LConstantT* instr) {
1992  Register reg = ToRegister(instr->result());
1993  Handle<Object> handle = instr->value(isolate());
1995  __ LoadObject(reg, handle);
1996 }
1997 
1998 
1999 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
2000  Register result = ToRegister(instr->result());
2001  Register map = ToRegister(instr->value());
2002  __ EnumLength(result, map);
2003 }
2004 
2005 
2006 void LCodeGen::DoDateField(LDateField* instr) {
2007  Register object = ToRegister(instr->date());
2008  Register result = ToRegister(instr->result());
2009  Register scratch = ToRegister(instr->temp());
2010  Smi* index = instr->index();
2011  Label runtime, done;
2012  ASSERT(object.is(result));
2013  ASSERT(object.is(eax));
2014 
2015  __ test(object, Immediate(kSmiTagMask));
2016  DeoptimizeIf(zero, instr->environment());
2017  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
2018  DeoptimizeIf(not_equal, instr->environment());
2019 
2020  if (index->value() == 0) {
2021  __ mov(result, FieldOperand(object, JSDate::kValueOffset));
2022  } else {
2023  if (index->value() < JSDate::kFirstUncachedField) {
2024  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2025  __ mov(scratch, Operand::StaticVariable(stamp));
2026  __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
2027  __ j(not_equal, &runtime, Label::kNear);
2028  __ mov(result, FieldOperand(object, JSDate::kValueOffset +
2029  kPointerSize * index->value()));
2030  __ jmp(&done, Label::kNear);
2031  }
2032  __ bind(&runtime);
2033  __ PrepareCallCFunction(2, scratch);
2034  __ mov(Operand(esp, 0), object);
2035  __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
2036  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2037  __ bind(&done);
2038  }
2039 }
2040 
2041 
2042 Operand LCodeGen::BuildSeqStringOperand(Register string,
2043  LOperand* index,
2044  String::Encoding encoding) {
2045  if (index->IsConstantOperand()) {
2046  int offset = ToRepresentation(LConstantOperand::cast(index),
2048  if (encoding == String::TWO_BYTE_ENCODING) {
2049  offset *= kUC16Size;
2050  }
2051  STATIC_ASSERT(kCharSize == 1);
2052  return FieldOperand(string, SeqString::kHeaderSize + offset);
2053  }
2054  return FieldOperand(
2055  string, ToRegister(index),
2056  encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
2058 }
2059 
2060 
2061 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
2062  String::Encoding encoding = instr->hydrogen()->encoding();
2063  Register result = ToRegister(instr->result());
2064  Register string = ToRegister(instr->string());
2065 
2066  if (FLAG_debug_code) {
2067  __ push(string);
2068  __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
2069  __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
2070 
2071  __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
2072  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2073  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2074  __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
2075  ? one_byte_seq_type : two_byte_seq_type));
2076  __ Check(equal, kUnexpectedStringType);
2077  __ pop(string);
2078  }
2079 
2080  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2081  if (encoding == String::ONE_BYTE_ENCODING) {
2082  __ movzx_b(result, operand);
2083  } else {
2084  __ movzx_w(result, operand);
2085  }
2086 }
2087 
2088 
2089 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2090  String::Encoding encoding = instr->hydrogen()->encoding();
2091  Register string = ToRegister(instr->string());
2092 
2093  if (FLAG_debug_code) {
2094  Register value = ToRegister(instr->value());
2095  Register index = ToRegister(instr->index());
2096  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2097  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2098  int encoding_mask =
2099  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2100  ? one_byte_seq_type : two_byte_seq_type;
2101  __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2102  }
2103 
2104  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2105  if (instr->value()->IsConstantOperand()) {
2106  int value = ToRepresentation(LConstantOperand::cast(instr->value()),
2108  ASSERT_LE(0, value);
2109  if (encoding == String::ONE_BYTE_ENCODING) {
2111  __ mov_b(operand, static_cast<int8_t>(value));
2112  } else {
2114  __ mov_w(operand, static_cast<int16_t>(value));
2115  }
2116  } else {
2117  Register value = ToRegister(instr->value());
2118  if (encoding == String::ONE_BYTE_ENCODING) {
2119  __ mov_b(operand, value);
2120  } else {
2121  __ mov_w(operand, value);
2122  }
2123  }
2124 }
2125 
2126 
2127 void LCodeGen::DoAddI(LAddI* instr) {
2128  LOperand* left = instr->left();
2129  LOperand* right = instr->right();
2130 
2131  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2132  if (right->IsConstantOperand()) {
2133  int32_t offset = ToRepresentation(LConstantOperand::cast(right),
2134  instr->hydrogen()->representation());
2135  __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
2136  } else {
2137  Operand address(ToRegister(left), ToRegister(right), times_1, 0);
2138  __ lea(ToRegister(instr->result()), address);
2139  }
2140  } else {
2141  if (right->IsConstantOperand()) {
2142  __ add(ToOperand(left),
2143  ToImmediate(right, instr->hydrogen()->representation()));
2144  } else {
2145  __ add(ToRegister(left), ToOperand(right));
2146  }
2147  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2148  DeoptimizeIf(overflow, instr->environment());
2149  }
2150  }
2151 }
2152 
2153 
2154 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2155  CpuFeatureScope scope(masm(), SSE2);
2156  LOperand* left = instr->left();
2157  LOperand* right = instr->right();
2158  ASSERT(left->Equals(instr->result()));
2159  HMathMinMax::Operation operation = instr->hydrogen()->operation();
2160  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2161  Label return_left;
2162  Condition condition = (operation == HMathMinMax::kMathMin)
2163  ? less_equal
2164  : greater_equal;
2165  if (right->IsConstantOperand()) {
2166  Operand left_op = ToOperand(left);
2167  Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
2168  instr->hydrogen()->representation());
2169  __ cmp(left_op, immediate);
2170  __ j(condition, &return_left, Label::kNear);
2171  __ mov(left_op, immediate);
2172  } else {
2173  Register left_reg = ToRegister(left);
2174  Operand right_op = ToOperand(right);
2175  __ cmp(left_reg, right_op);
2176  __ j(condition, &return_left, Label::kNear);
2177  __ mov(left_reg, right_op);
2178  }
2179  __ bind(&return_left);
2180  } else {
2181  ASSERT(instr->hydrogen()->representation().IsDouble());
2182  Label check_nan_left, check_zero, return_left, return_right;
2183  Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
2184  XMMRegister left_reg = ToDoubleRegister(left);
2185  XMMRegister right_reg = ToDoubleRegister(right);
2186  __ ucomisd(left_reg, right_reg);
2187  __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
2188  __ j(equal, &check_zero, Label::kNear); // left == right.
2189  __ j(condition, &return_left, Label::kNear);
2190  __ jmp(&return_right, Label::kNear);
2191 
2192  __ bind(&check_zero);
2193  XMMRegister xmm_scratch = double_scratch0();
2194  __ xorps(xmm_scratch, xmm_scratch);
2195  __ ucomisd(left_reg, xmm_scratch);
2196  __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
2197  // At this point, both left and right are either 0 or -0.
2198  if (operation == HMathMinMax::kMathMin) {
2199  __ orpd(left_reg, right_reg);
2200  } else {
2201  // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
2202  __ addsd(left_reg, right_reg);
2203  }
2204  __ jmp(&return_left, Label::kNear);
2205 
2206  __ bind(&check_nan_left);
2207  __ ucomisd(left_reg, left_reg); // NaN check.
2208  __ j(parity_even, &return_left, Label::kNear); // left == NaN.
2209  __ bind(&return_right);
2210  __ movaps(left_reg, right_reg);
2211 
2212  __ bind(&return_left);
2213  }
2214 }
2215 
2216 
2217 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2219  CpuFeatureScope scope(masm(), SSE2);
2220  XMMRegister left = ToDoubleRegister(instr->left());
2221  XMMRegister right = ToDoubleRegister(instr->right());
2222  XMMRegister result = ToDoubleRegister(instr->result());
2223  switch (instr->op()) {
2224  case Token::ADD:
2225  __ addsd(left, right);
2226  break;
2227  case Token::SUB:
2228  __ subsd(left, right);
2229  break;
2230  case Token::MUL:
2231  __ mulsd(left, right);
2232  break;
2233  case Token::DIV:
2234  __ divsd(left, right);
2235  // Don't delete this mov. It may improve performance on some CPUs,
2236  // when there is a mulsd depending on the result
2237  __ movaps(left, left);
2238  break;
2239  case Token::MOD: {
2240  // Pass two doubles as arguments on the stack.
2241  __ PrepareCallCFunction(4, eax);
2242  __ movsd(Operand(esp, 0 * kDoubleSize), left);
2243  __ movsd(Operand(esp, 1 * kDoubleSize), right);
2244  __ CallCFunction(
2245  ExternalReference::mod_two_doubles_operation(isolate()),
2246  4);
2247 
2248  // Return value is in st(0) on ia32.
2249  // Store it into the result register.
2250  __ sub(Operand(esp), Immediate(kDoubleSize));
2251  __ fstp_d(Operand(esp, 0));
2252  __ movsd(result, Operand(esp, 0));
2253  __ add(Operand(esp), Immediate(kDoubleSize));
2254  break;
2255  }
2256  default:
2257  UNREACHABLE();
2258  break;
2259  }
2260  } else {
2261  X87Register left = ToX87Register(instr->left());
2262  X87Register right = ToX87Register(instr->right());
2263  X87Register result = ToX87Register(instr->result());
2264  if (instr->op() != Token::MOD) {
2265  X87PrepareBinaryOp(left, right, result);
2266  }
2267  switch (instr->op()) {
2268  case Token::ADD:
2269  __ fadd_i(1);
2270  break;
2271  case Token::SUB:
2272  __ fsub_i(1);
2273  break;
2274  case Token::MUL:
2275  __ fmul_i(1);
2276  break;
2277  case Token::DIV:
2278  __ fdiv_i(1);
2279  break;
2280  case Token::MOD: {
2281  // Pass two doubles as arguments on the stack.
2282  __ PrepareCallCFunction(4, eax);
2283  X87Mov(Operand(esp, 1 * kDoubleSize), right);
2284  X87Mov(Operand(esp, 0), left);
2285  X87Free(right);
2286  ASSERT(left.is(result));
2287  X87PrepareToWrite(result);
2288  __ CallCFunction(
2289  ExternalReference::mod_two_doubles_operation(isolate()),
2290  4);
2291 
2292  // Return value is in st(0) on ia32.
2293  X87CommitWrite(result);
2294  break;
2295  }
2296  default:
2297  UNREACHABLE();
2298  break;
2299  }
2300  }
2301 }
2302 
2303 
2304 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2305  ASSERT(ToRegister(instr->context()).is(esi));
2306  ASSERT(ToRegister(instr->left()).is(edx));
2307  ASSERT(ToRegister(instr->right()).is(eax));
2308  ASSERT(ToRegister(instr->result()).is(eax));
2309 
2310  BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
2311  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2312 }
2313 
2314 
2315 template<class InstrType>
2316 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2317  int left_block = instr->TrueDestination(chunk_);
2318  int right_block = instr->FalseDestination(chunk_);
2319 
2320  int next_block = GetNextEmittedBlock();
2321 
2322  if (right_block == left_block || cc == no_condition) {
2323  EmitGoto(left_block);
2324  } else if (left_block == next_block) {
2325  __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2326  } else if (right_block == next_block) {
2327  __ j(cc, chunk_->GetAssemblyLabel(left_block));
2328  } else {
2329  __ j(cc, chunk_->GetAssemblyLabel(left_block));
2330  __ jmp(chunk_->GetAssemblyLabel(right_block));
2331  }
2332 }
2333 
2334 
2335 template<class InstrType>
2336 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2337  int false_block = instr->FalseDestination(chunk_);
2338  if (cc == no_condition) {
2339  __ jmp(chunk_->GetAssemblyLabel(false_block));
2340  } else {
2341  __ j(cc, chunk_->GetAssemblyLabel(false_block));
2342  }
2343 }
2344 
2345 
2346 void LCodeGen::DoBranch(LBranch* instr) {
2347  Representation r = instr->hydrogen()->value()->representation();
2348  if (r.IsSmiOrInteger32()) {
2349  Register reg = ToRegister(instr->value());
2350  __ test(reg, Operand(reg));
2351  EmitBranch(instr, not_zero);
2352  } else if (r.IsDouble()) {
2353  ASSERT(!info()->IsStub());
2354  CpuFeatureScope scope(masm(), SSE2);
2355  XMMRegister reg = ToDoubleRegister(instr->value());
2356  XMMRegister xmm_scratch = double_scratch0();
2357  __ xorps(xmm_scratch, xmm_scratch);
2358  __ ucomisd(reg, xmm_scratch);
2359  EmitBranch(instr, not_equal);
2360  } else {
2361  ASSERT(r.IsTagged());
2362  Register reg = ToRegister(instr->value());
2363  HType type = instr->hydrogen()->value()->type();
2364  if (type.IsBoolean()) {
2365  ASSERT(!info()->IsStub());
2366  __ cmp(reg, factory()->true_value());
2367  EmitBranch(instr, equal);
2368  } else if (type.IsSmi()) {
2369  ASSERT(!info()->IsStub());
2370  __ test(reg, Operand(reg));
2371  EmitBranch(instr, not_equal);
2372  } else if (type.IsJSArray()) {
2373  ASSERT(!info()->IsStub());
2374  EmitBranch(instr, no_condition);
2375  } else if (type.IsHeapNumber()) {
2376  ASSERT(!info()->IsStub());
2377  CpuFeatureScope scope(masm(), SSE2);
2378  XMMRegister xmm_scratch = double_scratch0();
2379  __ xorps(xmm_scratch, xmm_scratch);
2380  __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2381  EmitBranch(instr, not_equal);
2382  } else if (type.IsString()) {
2383  ASSERT(!info()->IsStub());
2384  __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2385  EmitBranch(instr, not_equal);
2386  } else {
2387  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2388  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2389 
2390  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2391  // undefined -> false.
2392  __ cmp(reg, factory()->undefined_value());
2393  __ j(equal, instr->FalseLabel(chunk_));
2394  }
2395  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2396  // true -> true.
2397  __ cmp(reg, factory()->true_value());
2398  __ j(equal, instr->TrueLabel(chunk_));
2399  // false -> false.
2400  __ cmp(reg, factory()->false_value());
2401  __ j(equal, instr->FalseLabel(chunk_));
2402  }
2403  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2404  // 'null' -> false.
2405  __ cmp(reg, factory()->null_value());
2406  __ j(equal, instr->FalseLabel(chunk_));
2407  }
2408 
2409  if (expected.Contains(ToBooleanStub::SMI)) {
2410  // Smis: 0 -> false, all other -> true.
2411  __ test(reg, Operand(reg));
2412  __ j(equal, instr->FalseLabel(chunk_));
2413  __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2414  } else if (expected.NeedsMap()) {
2415  // If we need a map later and have a Smi -> deopt.
2416  __ test(reg, Immediate(kSmiTagMask));
2417  DeoptimizeIf(zero, instr->environment());
2418  }
2419 
2420  Register map = no_reg; // Keep the compiler happy.
2421  if (expected.NeedsMap()) {
2422  map = ToRegister(instr->temp());
2423  ASSERT(!map.is(reg));
2424  __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2425 
2426  if (expected.CanBeUndetectable()) {
2427  // Undetectable -> false.
2428  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2429  1 << Map::kIsUndetectable);
2430  __ j(not_zero, instr->FalseLabel(chunk_));
2431  }
2432  }
2433 
2434  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2435  // spec object -> true.
2436  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2437  __ j(above_equal, instr->TrueLabel(chunk_));
2438  }
2439 
2440  if (expected.Contains(ToBooleanStub::STRING)) {
2441  // String value -> false iff empty.
2442  Label not_string;
2443  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2444  __ j(above_equal, &not_string, Label::kNear);
2445  __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2446  __ j(not_zero, instr->TrueLabel(chunk_));
2447  __ jmp(instr->FalseLabel(chunk_));
2448  __ bind(&not_string);
2449  }
2450 
2451  if (expected.Contains(ToBooleanStub::SYMBOL)) {
2452  // Symbol value -> true.
2453  __ CmpInstanceType(map, SYMBOL_TYPE);
2454  __ j(equal, instr->TrueLabel(chunk_));
2455  }
2456 
2457  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2458  // heap number -> false iff +0, -0, or NaN.
2459  Label not_heap_number;
2461  factory()->heap_number_map());
2462  __ j(not_equal, &not_heap_number, Label::kNear);
2464  CpuFeatureScope scope(masm(), SSE2);
2465  XMMRegister xmm_scratch = double_scratch0();
2466  __ xorps(xmm_scratch, xmm_scratch);
2467  __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2468  } else {
2469  __ fldz();
2471  __ FCmp();
2472  }
2473  __ j(zero, instr->FalseLabel(chunk_));
2474  __ jmp(instr->TrueLabel(chunk_));
2475  __ bind(&not_heap_number);
2476  }
2477 
2478  if (!expected.IsGeneric()) {
2479  // We've seen something for the first time -> deopt.
2480  // This can only happen if we are not generic already.
2481  DeoptimizeIf(no_condition, instr->environment());
2482  }
2483  }
2484  }
2485 }
2486 
2487 
2488 void LCodeGen::EmitGoto(int block) {
2489  if (!IsNextEmittedBlock(block)) {
2490  __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2491  }
2492 }
2493 
2494 
2495 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2496 }
2497 
2498 
2499 void LCodeGen::DoGoto(LGoto* instr) {
2500  EmitGoto(instr->block_id());
2501 }
2502 
2503 
2504 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2505  Condition cond = no_condition;
2506  switch (op) {
2507  case Token::EQ:
2508  case Token::EQ_STRICT:
2509  cond = equal;
2510  break;
2511  case Token::NE:
2512  case Token::NE_STRICT:
2513  cond = not_equal;
2514  break;
2515  case Token::LT:
2516  cond = is_unsigned ? below : less;
2517  break;
2518  case Token::GT:
2519  cond = is_unsigned ? above : greater;
2520  break;
2521  case Token::LTE:
2522  cond = is_unsigned ? below_equal : less_equal;
2523  break;
2524  case Token::GTE:
2525  cond = is_unsigned ? above_equal : greater_equal;
2526  break;
2527  case Token::IN:
2528  case Token::INSTANCEOF:
2529  default:
2530  UNREACHABLE();
2531  }
2532  return cond;
2533 }
2534 
2535 
2536 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2537  LOperand* left = instr->left();
2538  LOperand* right = instr->right();
2539  Condition cc = TokenToCondition(instr->op(), instr->is_double());
2540 
2541  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2542  // We can statically evaluate the comparison.
2543  double left_val = ToDouble(LConstantOperand::cast(left));
2544  double right_val = ToDouble(LConstantOperand::cast(right));
2545  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2546  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2547  EmitGoto(next_block);
2548  } else {
2549  if (instr->is_double()) {
2551  CpuFeatureScope scope(masm(), SSE2);
2552  __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2553  } else {
2555  __ FCmp();
2556  }
2557  // Don't base result on EFLAGS when a NaN is involved. Instead
2558  // jump to the false block.
2559  __ j(parity_even, instr->FalseLabel(chunk_));
2560  } else {
2561  if (right->IsConstantOperand()) {
2562  __ cmp(ToOperand(left),
2563  ToImmediate(right, instr->hydrogen()->representation()));
2564  } else if (left->IsConstantOperand()) {
2565  __ cmp(ToOperand(right),
2566  ToImmediate(left, instr->hydrogen()->representation()));
2567  // We transposed the operands. Reverse the condition.
2568  cc = ReverseCondition(cc);
2569  } else {
2570  __ cmp(ToRegister(left), ToOperand(right));
2571  }
2572  }
2573  EmitBranch(instr, cc);
2574  }
2575 }
2576 
2577 
2578 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2579  Register left = ToRegister(instr->left());
2580 
2581  if (instr->right()->IsConstantOperand()) {
2582  Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2583  __ CmpObject(left, right);
2584  } else {
2585  Operand right = ToOperand(instr->right());
2586  __ cmp(left, right);
2587  }
2588  EmitBranch(instr, equal);
2589 }
2590 
2591 
2592 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2593  if (instr->hydrogen()->representation().IsTagged()) {
2594  Register input_reg = ToRegister(instr->object());
2595  __ cmp(input_reg, factory()->the_hole_value());
2596  EmitBranch(instr, equal);
2597  return;
2598  }
2599 
2600  bool use_sse2 = CpuFeatures::IsSupported(SSE2);
2601  if (use_sse2) {
2602  CpuFeatureScope scope(masm(), SSE2);
2603  XMMRegister input_reg = ToDoubleRegister(instr->object());
2604  __ ucomisd(input_reg, input_reg);
2605  EmitFalseBranch(instr, parity_odd);
2606  } else {
2607  // Put the value to the top of stack
2608  X87Register src = ToX87Register(instr->object());
2609  X87LoadForUsage(src);
2610  __ fld(0);
2611  __ fld(0);
2612  __ FCmp();
2613  Label ok;
2614  __ j(parity_even, &ok, Label::kNear);
2615  __ fstp(0);
2616  EmitFalseBranch(instr, no_condition);
2617  __ bind(&ok);
2618  }
2619 
2620 
2621  __ sub(esp, Immediate(kDoubleSize));
2622  if (use_sse2) {
2623  CpuFeatureScope scope(masm(), SSE2);
2624  XMMRegister input_reg = ToDoubleRegister(instr->object());
2625  __ movsd(MemOperand(esp, 0), input_reg);
2626  } else {
2627  __ fstp_d(MemOperand(esp, 0));
2628  }
2629 
2630  __ add(esp, Immediate(kDoubleSize));
2631  int offset = sizeof(kHoleNanUpper32);
2632  __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2633  EmitBranch(instr, equal);
2634 }
2635 
2636 
2637 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2638  Representation rep = instr->hydrogen()->value()->representation();
2639  ASSERT(!rep.IsInteger32());
2640  Register scratch = ToRegister(instr->temp());
2641 
2642  if (rep.IsDouble()) {
2643  CpuFeatureScope use_sse2(masm(), SSE2);
2644  XMMRegister value = ToDoubleRegister(instr->value());
2645  XMMRegister xmm_scratch = double_scratch0();
2646  __ xorps(xmm_scratch, xmm_scratch);
2647  __ ucomisd(xmm_scratch, value);
2648  EmitFalseBranch(instr, not_equal);
2649  __ movmskpd(scratch, value);
2650  __ test(scratch, Immediate(1));
2651  EmitBranch(instr, not_zero);
2652  } else {
2653  Register value = ToRegister(instr->value());
2654  Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2655  __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2657  Immediate(0x1));
2658  EmitFalseBranch(instr, no_overflow);
2660  Immediate(0x00000000));
2661  EmitBranch(instr, equal);
2662  }
2663 }
2664 
2665 
2666 Condition LCodeGen::EmitIsObject(Register input,
2667  Register temp1,
2668  Label* is_not_object,
2669  Label* is_object) {
2670  __ JumpIfSmi(input, is_not_object);
2671 
2672  __ cmp(input, isolate()->factory()->null_value());
2673  __ j(equal, is_object);
2674 
2675  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
2676  // Undetectable objects behave like undefined.
2677  __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
2678  1 << Map::kIsUndetectable);
2679  __ j(not_zero, is_not_object);
2680 
2681  __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
2683  __ j(below, is_not_object);
2685  return below_equal;
2686 }
2687 
2688 
2689 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2690  Register reg = ToRegister(instr->value());
2691  Register temp = ToRegister(instr->temp());
2692 
2693  Condition true_cond = EmitIsObject(
2694  reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2695 
2696  EmitBranch(instr, true_cond);
2697 }
2698 
2699 
2700 Condition LCodeGen::EmitIsString(Register input,
2701  Register temp1,
2702  Label* is_not_string,
2703  SmiCheck check_needed = INLINE_SMI_CHECK) {
2704  if (check_needed == INLINE_SMI_CHECK) {
2705  __ JumpIfSmi(input, is_not_string);
2706  }
2707 
2708  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2709 
2710  return cond;
2711 }
2712 
2713 
2714 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2715  Register reg = ToRegister(instr->value());
2716  Register temp = ToRegister(instr->temp());
2717 
2718  SmiCheck check_needed =
2719  instr->hydrogen()->value()->IsHeapObject()
2721 
2722  Condition true_cond = EmitIsString(
2723  reg, temp, instr->FalseLabel(chunk_), check_needed);
2724 
2725  EmitBranch(instr, true_cond);
2726 }
2727 
2728 
2729 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2730  Operand input = ToOperand(instr->value());
2731 
2732  __ test(input, Immediate(kSmiTagMask));
2733  EmitBranch(instr, zero);
2734 }
2735 
2736 
2737 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2738  Register input = ToRegister(instr->value());
2739  Register temp = ToRegister(instr->temp());
2740 
2741  if (!instr->hydrogen()->value()->IsHeapObject()) {
2742  STATIC_ASSERT(kSmiTag == 0);
2743  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2744  }
2745  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2746  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2747  1 << Map::kIsUndetectable);
2748  EmitBranch(instr, not_zero);
2749 }
2750 
2751 
2752 static Condition ComputeCompareCondition(Token::Value op) {
2753  switch (op) {
2754  case Token::EQ_STRICT:
2755  case Token::EQ:
2756  return equal;
2757  case Token::LT:
2758  return less;
2759  case Token::GT:
2760  return greater;
2761  case Token::LTE:
2762  return less_equal;
2763  case Token::GTE:
2764  return greater_equal;
2765  default:
2766  UNREACHABLE();
2767  return no_condition;
2768  }
2769 }
2770 
2771 
2772 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2773  Token::Value op = instr->op();
2774 
2775  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2776  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2777 
2778  Condition condition = ComputeCompareCondition(op);
2779  __ test(eax, Operand(eax));
2780 
2781  EmitBranch(instr, condition);
2782 }
2783 
2784 
2785 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2786  InstanceType from = instr->from();
2787  InstanceType to = instr->to();
2788  if (from == FIRST_TYPE) return to;
2789  ASSERT(from == to || to == LAST_TYPE);
2790  return from;
2791 }
2792 
2793 
2794 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2795  InstanceType from = instr->from();
2796  InstanceType to = instr->to();
2797  if (from == to) return equal;
2798  if (to == LAST_TYPE) return above_equal;
2799  if (from == FIRST_TYPE) return below_equal;
2800  UNREACHABLE();
2801  return equal;
2802 }
2803 
2804 
2805 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2806  Register input = ToRegister(instr->value());
2807  Register temp = ToRegister(instr->temp());
2808 
2809  if (!instr->hydrogen()->value()->IsHeapObject()) {
2810  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2811  }
2812 
2813  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2814  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2815 }
2816 
2817 
2818 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2819  Register input = ToRegister(instr->value());
2820  Register result = ToRegister(instr->result());
2821 
2822  __ AssertString(input);
2823 
2824  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2825  __ IndexFromHash(result, result);
2826 }
2827 
2828 
2829 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2830  LHasCachedArrayIndexAndBranch* instr) {
2831  Register input = ToRegister(instr->value());
2832 
2835  EmitBranch(instr, equal);
2836 }
2837 
2838 
2839 // Branches to a label or falls through with the answer in the z flag. Trashes
2840 // the temp registers, but not the input.
2841 void LCodeGen::EmitClassOfTest(Label* is_true,
2842  Label* is_false,
2843  Handle<String>class_name,
2844  Register input,
2845  Register temp,
2846  Register temp2) {
2847  ASSERT(!input.is(temp));
2848  ASSERT(!input.is(temp2));
2849  ASSERT(!temp.is(temp2));
2850  __ JumpIfSmi(input, is_false);
2851 
2852  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2853  // Assuming the following assertions, we can use the same compares to test
2854  // for both being a function type and being in the object type range.
2859  LAST_SPEC_OBJECT_TYPE - 1);
2861  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2862  __ j(below, is_false);
2863  __ j(equal, is_true);
2864  __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2865  __ j(equal, is_true);
2866  } else {
2867  // Faster code path to avoid two compares: subtract lower bound from the
2868  // actual type and do a signed compare with the width of the type range.
2869  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2870  __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2871  __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2872  __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2874  __ j(above, is_false);
2875  }
2876 
2877  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2878  // Check if the constructor in the map is a function.
2879  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
2880  // Objects with a non-function constructor have class 'Object'.
2881  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
2882  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2883  __ j(not_equal, is_true);
2884  } else {
2885  __ j(not_equal, is_false);
2886  }
2887 
2888  // temp now contains the constructor function. Grab the
2889  // instance class name from there.
2891  __ mov(temp, FieldOperand(temp,
2893  // The class name we are testing against is internalized since it's a literal.
2894  // The name in the constructor is internalized because of the way the context
2895  // is booted. This routine isn't expected to work for random API-created
2896  // classes and it doesn't have to because you can't access it with natives
2897  // syntax. Since both sides are internalized it is sufficient to use an
2898  // identity comparison.
2899  __ cmp(temp, class_name);
2900  // End with the answer in the z flag.
2901 }
2902 
2903 
2904 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2905  Register input = ToRegister(instr->value());
2906  Register temp = ToRegister(instr->temp());
2907  Register temp2 = ToRegister(instr->temp2());
2908 
2909  Handle<String> class_name = instr->hydrogen()->class_name();
2910 
2911  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2912  class_name, input, temp, temp2);
2913 
2914  EmitBranch(instr, equal);
2915 }
2916 
2917 
2918 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2919  Register reg = ToRegister(instr->value());
2920  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2921  EmitBranch(instr, equal);
2922 }
2923 
2924 
2925 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2926  // Object and function are in fixed registers defined by the stub.
2927  ASSERT(ToRegister(instr->context()).is(esi));
2928  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2929  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2930 
2931  Label true_value, done;
2932  __ test(eax, Operand(eax));
2933  __ j(zero, &true_value, Label::kNear);
2934  __ mov(ToRegister(instr->result()), factory()->false_value());
2935  __ jmp(&done, Label::kNear);
2936  __ bind(&true_value);
2937  __ mov(ToRegister(instr->result()), factory()->true_value());
2938  __ bind(&done);
2939 }
2940 
2941 
2942 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2943  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2944  public:
2945  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2946  LInstanceOfKnownGlobal* instr,
2947  const X87Stack& x87_stack)
2948  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
2949  virtual void Generate() V8_OVERRIDE {
2950  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2951  }
2952  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2953  Label* map_check() { return &map_check_; }
2954  private:
2955  LInstanceOfKnownGlobal* instr_;
2956  Label map_check_;
2957  };
2958 
2959  DeferredInstanceOfKnownGlobal* deferred;
2960  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
2961 
2962  Label done, false_result;
2963  Register object = ToRegister(instr->value());
2964  Register temp = ToRegister(instr->temp());
2965 
2966  // A Smi is not an instance of anything.
2967  __ JumpIfSmi(object, &false_result, Label::kNear);
2968 
2969  // This is the inlined call site instanceof cache. The two occurences of the
2970  // hole value will be patched to the last map/result pair generated by the
2971  // instanceof stub.
2972  Label cache_miss;
2973  Register map = ToRegister(instr->temp());
2974  __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2975  __ bind(deferred->map_check()); // Label for calculating code patching.
2976  Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2977  __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
2978  __ j(not_equal, &cache_miss, Label::kNear);
2979  __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
2980  __ jmp(&done, Label::kNear);
2981 
2982  // The inlined call site cache did not match. Check for null and string
2983  // before calling the deferred code.
2984  __ bind(&cache_miss);
2985  // Null is not an instance of anything.
2986  __ cmp(object, factory()->null_value());
2987  __ j(equal, &false_result, Label::kNear);
2988 
2989  // String values are not instances of anything.
2990  Condition is_string = masm_->IsObjectStringType(object, temp, temp);
2991  __ j(is_string, &false_result, Label::kNear);
2992 
2993  // Go to the deferred code.
2994  __ jmp(deferred->entry());
2995 
2996  __ bind(&false_result);
2997  __ mov(ToRegister(instr->result()), factory()->false_value());
2998 
2999  // Here result has either true or false. Deferred code also produces true or
3000  // false object.
3001  __ bind(deferred->exit());
3002  __ bind(&done);
3003 }
3004 
3005 
3006 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
3007  Label* map_check) {
3008  PushSafepointRegistersScope scope(this);
3009 
3011  flags = static_cast<InstanceofStub::Flags>(
3013  flags = static_cast<InstanceofStub::Flags>(
3015  flags = static_cast<InstanceofStub::Flags>(
3017  InstanceofStub stub(flags);
3018 
3019  // Get the temp register reserved by the instruction. This needs to be a
3020  // register which is pushed last by PushSafepointRegisters as top of the
3021  // stack is used to pass the offset to the location of the map check to
3022  // the stub.
3023  Register temp = ToRegister(instr->temp());
3024  ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
3025  __ LoadHeapObject(InstanceofStub::right(), instr->function());
3026  static const int kAdditionalDelta = 13;
3027  int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
3028  __ mov(temp, Immediate(delta));
3029  __ StoreToSafepointRegisterSlot(temp, temp);
3030  CallCodeGeneric(stub.GetCode(isolate()),
3031  RelocInfo::CODE_TARGET,
3032  instr,
3033  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3034  // Get the deoptimization index of the LLazyBailout-environment that
3035  // corresponds to this instruction.
3036  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3037  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3038 
3039  // Put the result value into the eax slot and restore all registers.
3040  __ StoreToSafepointRegisterSlot(eax, eax);
3041 }
3042 
3043 
3044 void LCodeGen::DoCmpT(LCmpT* instr) {
3045  Token::Value op = instr->op();
3046 
3047  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
3048  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3049 
3050  Condition condition = ComputeCompareCondition(op);
3051  Label true_value, done;
3052  __ test(eax, Operand(eax));
3053  __ j(condition, &true_value, Label::kNear);
3054  __ mov(ToRegister(instr->result()), factory()->false_value());
3055  __ jmp(&done, Label::kNear);
3056  __ bind(&true_value);
3057  __ mov(ToRegister(instr->result()), factory()->true_value());
3058  __ bind(&done);
3059 }
3060 
3061 
3062 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
3063  int extra_value_count = dynamic_frame_alignment ? 2 : 1;
3064 
3065  if (instr->has_constant_parameter_count()) {
3066  int parameter_count = ToInteger32(instr->constant_parameter_count());
3067  if (dynamic_frame_alignment && FLAG_debug_code) {
3068  __ cmp(Operand(esp,
3069  (parameter_count + extra_value_count) * kPointerSize),
3070  Immediate(kAlignmentZapValue));
3071  __ Assert(equal, kExpectedAlignmentMarker);
3072  }
3073  __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
3074  } else {
3075  Register reg = ToRegister(instr->parameter_count());
3076  // The argument count parameter is a smi
3077  __ SmiUntag(reg);
3078  Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
3079  if (dynamic_frame_alignment && FLAG_debug_code) {
3080  ASSERT(extra_value_count == 2);
3081  __ cmp(Operand(esp, reg, times_pointer_size,
3082  extra_value_count * kPointerSize),
3083  Immediate(kAlignmentZapValue));
3084  __ Assert(equal, kExpectedAlignmentMarker);
3085  }
3086 
3087  // emit code to restore stack based on instr->parameter_count()
3088  __ pop(return_addr_reg); // save return address
3089  if (dynamic_frame_alignment) {
3090  __ inc(reg); // 1 more for alignment
3091  }
3092  __ shl(reg, kPointerSizeLog2);
3093  __ add(esp, reg);
3094  __ jmp(return_addr_reg);
3095  }
3096 }
3097 
3098 
3099 void LCodeGen::DoReturn(LReturn* instr) {
3100  if (FLAG_trace && info()->IsOptimizing()) {
3101  // Preserve the return value on the stack and rely on the runtime call
3102  // to return the value in the same register. We're leaving the code
3103  // managed by the register allocator and tearing down the frame, it's
3104  // safe to write to the context register.
3105  __ push(eax);
3107  __ CallRuntime(Runtime::kTraceExit, 1);
3108  }
3109  if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
3110  RestoreCallerDoubles();
3111  }
3112  if (dynamic_frame_alignment_) {
3113  // Fetch the state of the dynamic frame alignment.
3114  __ mov(edx, Operand(ebp,
3116  }
3117  int no_frame_start = -1;
3118  if (NeedsEagerFrame()) {
3119  __ mov(esp, ebp);
3120  __ pop(ebp);
3121  no_frame_start = masm_->pc_offset();
3122  }
3123  if (dynamic_frame_alignment_) {
3124  Label no_padding;
3125  __ cmp(edx, Immediate(kNoAlignmentPadding));
3126  __ j(equal, &no_padding, Label::kNear);
3127 
3128  EmitReturn(instr, true);
3129  __ bind(&no_padding);
3130  }
3131 
3132  EmitReturn(instr, false);
3133  if (no_frame_start != -1) {
3134  info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
3135  }
3136 }
3137 
3138 
3139 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3140  Register result = ToRegister(instr->result());
3141  __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
3142  if (instr->hydrogen()->RequiresHoleCheck()) {
3143  __ cmp(result, factory()->the_hole_value());
3144  DeoptimizeIf(equal, instr->environment());
3145  }
3146 }
3147 
3148 
3149 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3150  ASSERT(ToRegister(instr->context()).is(esi));
3151  ASSERT(ToRegister(instr->global_object()).is(edx));
3152  ASSERT(ToRegister(instr->result()).is(eax));
3153 
3154  __ mov(ecx, instr->name());
3155  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3156  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
3157  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3158 }
3159 
3160 
3161 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3162  Register value = ToRegister(instr->value());
3163  Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
3164 
3165  // If the cell we are storing to contains the hole it could have
3166  // been deleted from the property dictionary. In that case, we need
3167  // to update the property details in the property dictionary to mark
3168  // it as no longer deleted. We deoptimize in that case.
3169  if (instr->hydrogen()->RequiresHoleCheck()) {
3170  __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
3171  DeoptimizeIf(equal, instr->environment());
3172  }
3173 
3174  // Store the value.
3175  __ mov(Operand::ForCell(cell_handle), value);
3176  // Cells are always rescanned, so no write barrier here.
3177 }
3178 
3179 
3180 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3181  Register context = ToRegister(instr->context());
3182  Register result = ToRegister(instr->result());
3183  __ mov(result, ContextOperand(context, instr->slot_index()));
3184 
3185  if (instr->hydrogen()->RequiresHoleCheck()) {
3186  __ cmp(result, factory()->the_hole_value());
3187  if (instr->hydrogen()->DeoptimizesOnHole()) {
3188  DeoptimizeIf(equal, instr->environment());
3189  } else {
3190  Label is_not_hole;
3191  __ j(not_equal, &is_not_hole, Label::kNear);
3192  __ mov(result, factory()->undefined_value());
3193  __ bind(&is_not_hole);
3194  }
3195  }
3196 }
3197 
3198 
3199 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3200  Register context = ToRegister(instr->context());
3201  Register value = ToRegister(instr->value());
3202 
3203  Label skip_assignment;
3204 
3205  Operand target = ContextOperand(context, instr->slot_index());
3206  if (instr->hydrogen()->RequiresHoleCheck()) {
3207  __ cmp(target, factory()->the_hole_value());
3208  if (instr->hydrogen()->DeoptimizesOnHole()) {
3209  DeoptimizeIf(equal, instr->environment());
3210  } else {
3211  __ j(not_equal, &skip_assignment, Label::kNear);
3212  }
3213  }
3214 
3215  __ mov(target, value);
3216  if (instr->hydrogen()->NeedsWriteBarrier()) {
3217  SmiCheck check_needed =
3218  instr->hydrogen()->value()->IsHeapObject()
3220  Register temp = ToRegister(instr->temp());
3221  int offset = Context::SlotOffset(instr->slot_index());
3222  __ RecordWriteContextSlot(context,
3223  offset,
3224  value,
3225  temp,
3226  GetSaveFPRegsMode(),
3228  check_needed);
3229  }
3230 
3231  __ bind(&skip_assignment);
3232 }
3233 
3234 
3235 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3236  HObjectAccess access = instr->hydrogen()->access();
3237  int offset = access.offset();
3238 
3239  if (access.IsExternalMemory()) {
3240  Register result = ToRegister(instr->result());
3241  MemOperand operand = instr->object()->IsConstantOperand()
3242  ? MemOperand::StaticVariable(ToExternalReference(
3243  LConstantOperand::cast(instr->object())))
3244  : MemOperand(ToRegister(instr->object()), offset);
3245  __ Load(result, operand, access.representation());
3246  return;
3247  }
3248 
3249  Register object = ToRegister(instr->object());
3250  if (instr->hydrogen()->representation().IsDouble()) {
3252  CpuFeatureScope scope(masm(), SSE2);
3253  XMMRegister result = ToDoubleRegister(instr->result());
3254  __ movsd(result, FieldOperand(object, offset));
3255  } else {
3256  X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3257  }
3258  return;
3259  }
3260 
3261  Register result = ToRegister(instr->result());
3262  if (!access.IsInobject()) {
3263  __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3264  object = result;
3265  }
3266  __ Load(result, FieldOperand(object, offset), access.representation());
3267 }
3268 
3269 
3270 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
3271  ASSERT(!operand->IsDoubleRegister());
3272  if (operand->IsConstantOperand()) {
3273  Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
3275  if (object->IsSmi()) {
3276  __ Push(Handle<Smi>::cast(object));
3277  } else {
3278  __ PushHeapObject(Handle<HeapObject>::cast(object));
3279  }
3280  } else if (operand->IsRegister()) {
3281  __ push(ToRegister(operand));
3282  } else {
3283  __ push(ToOperand(operand));
3284  }
3285 }
3286 
3287 
3288 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3289  ASSERT(ToRegister(instr->context()).is(esi));
3290  ASSERT(ToRegister(instr->object()).is(edx));
3291  ASSERT(ToRegister(instr->result()).is(eax));
3292 
3293  __ mov(ecx, instr->name());
3294  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3295  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3296 }
3297 
3298 
3299 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3300  Register function = ToRegister(instr->function());
3301  Register temp = ToRegister(instr->temp());
3302  Register result = ToRegister(instr->result());
3303 
3304  // Check that the function really is a function.
3305  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
3306  DeoptimizeIf(not_equal, instr->environment());
3307 
3308  // Check whether the function has an instance prototype.
3309  Label non_instance;
3310  __ test_b(FieldOperand(result, Map::kBitFieldOffset),
3312  __ j(not_zero, &non_instance, Label::kNear);
3313 
3314  // Get the prototype or initial map from the function.
3315  __ mov(result,
3317 
3318  // Check that the function has a prototype or an initial map.
3319  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3320  DeoptimizeIf(equal, instr->environment());
3321 
3322  // If the function does not have an initial map, we're done.
3323  Label done;
3324  __ CmpObjectType(result, MAP_TYPE, temp);
3325  __ j(not_equal, &done, Label::kNear);
3326 
3327  // Get the prototype from the initial map.
3328  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3329  __ jmp(&done, Label::kNear);
3330 
3331  // Non-instance prototype: Fetch prototype from constructor field
3332  // in the function's map.
3333  __ bind(&non_instance);
3334  __ mov(result, FieldOperand(result, Map::kConstructorOffset));
3335 
3336  // All done.
3337  __ bind(&done);
3338 }
3339 
3340 
3341 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3342  Register result = ToRegister(instr->result());
3343  __ LoadRoot(result, instr->index());
3344 }
3345 
3346 
3347 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3348  Register arguments = ToRegister(instr->arguments());
3349  Register result = ToRegister(instr->result());
3350  if (instr->length()->IsConstantOperand() &&
3351  instr->index()->IsConstantOperand()) {
3352  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3353  int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3354  int index = (const_length - const_index) + 1;
3355  __ mov(result, Operand(arguments, index * kPointerSize));
3356  } else {
3357  Register length = ToRegister(instr->length());
3358  Operand index = ToOperand(instr->index());
3359  // There are two words between the frame pointer and the last argument.
3360  // Subtracting from length accounts for one of them add one more.
3361  __ sub(length, index);
3362  __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3363  }
3364 }
3365 
3366 
3367 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3368  ElementsKind elements_kind = instr->elements_kind();
3369  LOperand* key = instr->key();
3370  if (!key->IsConstantOperand() &&
3371  ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3372  elements_kind)) {
3373  __ SmiUntag(ToRegister(key));
3374  }
3375  Operand operand(BuildFastArrayOperand(
3376  instr->elements(),
3377  key,
3378  instr->hydrogen()->key()->representation(),
3379  elements_kind,
3380  0,
3381  instr->additional_index()));
3382  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3383  elements_kind == FLOAT32_ELEMENTS) {
3385  CpuFeatureScope scope(masm(), SSE2);
3386  XMMRegister result(ToDoubleRegister(instr->result()));
3387  __ movss(result, operand);
3388  __ cvtss2sd(result, result);
3389  } else {
3390  X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3391  }
3392  } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3393  elements_kind == FLOAT64_ELEMENTS) {
3395  CpuFeatureScope scope(masm(), SSE2);
3396  __ movsd(ToDoubleRegister(instr->result()), operand);
3397  } else {
3398  X87Mov(ToX87Register(instr->result()), operand);
3399  }
3400  } else {
3401  Register result(ToRegister(instr->result()));
3402  switch (elements_kind) {
3404  case INT8_ELEMENTS:
3405  __ movsx_b(result, operand);
3406  break;
3409  case UINT8_ELEMENTS:
3411  __ movzx_b(result, operand);
3412  break;
3414  case INT16_ELEMENTS:
3415  __ movsx_w(result, operand);
3416  break;
3418  case UINT16_ELEMENTS:
3419  __ movzx_w(result, operand);
3420  break;
3422  case INT32_ELEMENTS:
3423  __ mov(result, operand);
3424  break;
3426  case UINT32_ELEMENTS:
3427  __ mov(result, operand);
3428  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3429  __ test(result, Operand(result));
3430  DeoptimizeIf(negative, instr->environment());
3431  }
3432  break;
3435  case FLOAT32_ELEMENTS:
3436  case FLOAT64_ELEMENTS:
3437  case FAST_SMI_ELEMENTS:
3438  case FAST_ELEMENTS:
3439  case FAST_DOUBLE_ELEMENTS:
3441  case FAST_HOLEY_ELEMENTS:
3443  case DICTIONARY_ELEMENTS:
3445  UNREACHABLE();
3446  break;
3447  }
3448  }
3449 }
3450 
3451 
3452 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3453  if (instr->hydrogen()->RequiresHoleCheck()) {
3455  sizeof(kHoleNanLower32);
3456  Operand hole_check_operand = BuildFastArrayOperand(
3457  instr->elements(), instr->key(),
3458  instr->hydrogen()->key()->representation(),
3460  offset,
3461  instr->additional_index());
3462  __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3463  DeoptimizeIf(equal, instr->environment());
3464  }
3465 
3466  Operand double_load_operand = BuildFastArrayOperand(
3467  instr->elements(),
3468  instr->key(),
3469  instr->hydrogen()->key()->representation(),
3472  instr->additional_index());
3474  CpuFeatureScope scope(masm(), SSE2);
3475  XMMRegister result = ToDoubleRegister(instr->result());
3476  __ movsd(result, double_load_operand);
3477  } else {
3478  X87Mov(ToX87Register(instr->result()), double_load_operand);
3479  }
3480 }
3481 
3482 
3483 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3484  Register result = ToRegister(instr->result());
3485 
3486  // Load the result.
3487  __ mov(result,
3488  BuildFastArrayOperand(instr->elements(),
3489  instr->key(),
3490  instr->hydrogen()->key()->representation(),
3491  FAST_ELEMENTS,
3493  instr->additional_index()));
3494 
3495  // Check for the hole value.
3496  if (instr->hydrogen()->RequiresHoleCheck()) {
3497  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3498  __ test(result, Immediate(kSmiTagMask));
3499  DeoptimizeIf(not_equal, instr->environment());
3500  } else {
3501  __ cmp(result, factory()->the_hole_value());
3502  DeoptimizeIf(equal, instr->environment());
3503  }
3504  }
3505 }
3506 
3507 
3508 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3509  if (instr->is_typed_elements()) {
3510  DoLoadKeyedExternalArray(instr);
3511  } else if (instr->hydrogen()->representation().IsDouble()) {
3512  DoLoadKeyedFixedDoubleArray(instr);
3513  } else {
3514  DoLoadKeyedFixedArray(instr);
3515  }
3516 }
3517 
3518 
3519 Operand LCodeGen::BuildFastArrayOperand(
3520  LOperand* elements_pointer,
3521  LOperand* key,
3522  Representation key_representation,
3523  ElementsKind elements_kind,
3524  uint32_t offset,
3525  uint32_t additional_index) {
3526  Register elements_pointer_reg = ToRegister(elements_pointer);
3527  int element_shift_size = ElementsKindToShiftSize(elements_kind);
3528  if (IsFixedTypedArrayElementsKind(elements_kind)) {
3530  }
3531  int shift_size = element_shift_size;
3532  if (key->IsConstantOperand()) {
3533  int constant_value = ToInteger32(LConstantOperand::cast(key));
3534  if (constant_value & 0xF0000000) {
3535  Abort(kArrayIndexConstantValueTooBig);
3536  }
3537  return Operand(elements_pointer_reg,
3538  ((constant_value + additional_index) << shift_size)
3539  + offset);
3540  } else {
3541  // Take the tag bit into account while computing the shift size.
3542  if (key_representation.IsSmi() && (shift_size >= 1)) {
3543  shift_size -= kSmiTagSize;
3544  }
3545  ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3546  return Operand(elements_pointer_reg,
3547  ToRegister(key),
3548  scale_factor,
3549  offset + (additional_index << element_shift_size));
3550  }
3551 }
3552 
3553 
3554 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3555  ASSERT(ToRegister(instr->context()).is(esi));
3556  ASSERT(ToRegister(instr->object()).is(edx));
3557  ASSERT(ToRegister(instr->key()).is(ecx));
3558 
3559  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3560  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3561 }
3562 
3563 
3564 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3565  Register result = ToRegister(instr->result());
3566 
3567  if (instr->hydrogen()->from_inlined()) {
3568  __ lea(result, Operand(esp, -2 * kPointerSize));
3569  } else {
3570  // Check for arguments adapter frame.
3571  Label done, adapted;
3572  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3573  __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3574  __ cmp(Operand(result),
3576  __ j(equal, &adapted, Label::kNear);
3577 
3578  // No arguments adaptor frame.
3579  __ mov(result, Operand(ebp));
3580  __ jmp(&done, Label::kNear);
3581 
3582  // Arguments adaptor frame present.
3583  __ bind(&adapted);
3584  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3585 
3586  // Result is the frame pointer for the frame if not adapted and for the real
3587  // frame below the adaptor frame if adapted.
3588  __ bind(&done);
3589  }
3590 }
3591 
3592 
3593 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3594  Operand elem = ToOperand(instr->elements());
3595  Register result = ToRegister(instr->result());
3596 
3597  Label done;
3598 
3599  // If no arguments adaptor frame the number of arguments is fixed.
3600  __ cmp(ebp, elem);
3601  __ mov(result, Immediate(scope()->num_parameters()));
3602  __ j(equal, &done, Label::kNear);
3603 
3604  // Arguments adaptor frame present. Get argument length from there.
3605  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3606  __ mov(result, Operand(result,
3608  __ SmiUntag(result);
3609 
3610  // Argument length is in result register.
3611  __ bind(&done);
3612 }
3613 
3614 
3615 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3616  Register receiver = ToRegister(instr->receiver());
3617  Register function = ToRegister(instr->function());
3618 
3619  // If the receiver is null or undefined, we have to pass the global
3620  // object as a receiver to normal functions. Values have to be
3621  // passed unchanged to builtins and strict-mode functions.
3622  Label receiver_ok, global_object;
3623  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3624  Register scratch = ToRegister(instr->temp());
3625 
3626  if (!instr->hydrogen()->known_function()) {
3627  // Do not transform the receiver to object for strict mode
3628  // functions.
3629  __ mov(scratch,
3633  __ j(not_equal, &receiver_ok, dist);
3634 
3635  // Do not transform the receiver to object for builtins.
3638  __ j(not_equal, &receiver_ok, dist);
3639  }
3640 
3641  // Normal function. Replace undefined or null with global receiver.
3642  __ cmp(receiver, factory()->null_value());
3643  __ j(equal, &global_object, Label::kNear);
3644  __ cmp(receiver, factory()->undefined_value());
3645  __ j(equal, &global_object, Label::kNear);
3646 
3647  // The receiver should be a JS object.
3648  __ test(receiver, Immediate(kSmiTagMask));
3649  DeoptimizeIf(equal, instr->environment());
3650  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
3651  DeoptimizeIf(below, instr->environment());
3652 
3653  __ jmp(&receiver_ok, Label::kNear);
3654  __ bind(&global_object);
3655  __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3656  const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
3657  __ mov(receiver, Operand(receiver, global_offset));
3658  const int receiver_offset = GlobalObject::kGlobalReceiverOffset;
3659  __ mov(receiver, FieldOperand(receiver, receiver_offset));
3660  __ bind(&receiver_ok);
3661 }
3662 
3663 
3664 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3665  Register receiver = ToRegister(instr->receiver());
3666  Register function = ToRegister(instr->function());
3667  Register length = ToRegister(instr->length());
3668  Register elements = ToRegister(instr->elements());
3669  ASSERT(receiver.is(eax)); // Used for parameter count.
3670  ASSERT(function.is(edi)); // Required by InvokeFunction.
3671  ASSERT(ToRegister(instr->result()).is(eax));
3672 
3673  // Copy the arguments to this function possibly from the
3674  // adaptor frame below it.
3675  const uint32_t kArgumentsLimit = 1 * KB;
3676  __ cmp(length, kArgumentsLimit);
3677  DeoptimizeIf(above, instr->environment());
3678 
3679  __ push(receiver);
3680  __ mov(receiver, length);
3681 
3682  // Loop through the arguments pushing them onto the execution
3683  // stack.
3684  Label invoke, loop;
3685  // length is a small non-negative integer, due to the test above.
3686  __ test(length, Operand(length));
3687  __ j(zero, &invoke, Label::kNear);
3688  __ bind(&loop);
3689  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3690  __ dec(length);
3691  __ j(not_zero, &loop);
3692 
3693  // Invoke the function.
3694  __ bind(&invoke);
3695  ASSERT(instr->HasPointerMap());
3696  LPointerMap* pointers = instr->pointer_map();
3697  SafepointGenerator safepoint_generator(
3698  this, pointers, Safepoint::kLazyDeopt);
3699  ParameterCount actual(eax);
3700  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3701 }
3702 
3703 
3704 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3705  __ int3();
3706 }
3707 
3708 
3709 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3710  LOperand* argument = instr->value();
3711  EmitPushTaggedOperand(argument);
3712 }
3713 
3714 
3715 void LCodeGen::DoDrop(LDrop* instr) {
3716  __ Drop(instr->count());
3717 }
3718 
3719 
3720 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3721  Register result = ToRegister(instr->result());
3722  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3723 }
3724 
3725 
3726 void LCodeGen::DoContext(LContext* instr) {
3727  Register result = ToRegister(instr->result());
3728  if (info()->IsOptimizing()) {
3729  __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3730  } else {
3731  // If there is no frame, the context must be in esi.
3732  ASSERT(result.is(esi));
3733  }
3734 }
3735 
3736 
3737 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3738  ASSERT(ToRegister(instr->context()).is(esi));
3739  __ push(esi); // The context is the first argument.
3740  __ push(Immediate(instr->hydrogen()->pairs()));
3741  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3742  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3743 }
3744 
3745 
3746 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3748  int arity,
3749  LInstruction* instr,
3750  EDIState edi_state) {
3751  bool dont_adapt_arguments =
3752  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3753  bool can_invoke_directly =
3754  dont_adapt_arguments || formal_parameter_count == arity;
3755 
3756  if (can_invoke_directly) {
3757  if (edi_state == EDI_UNINITIALIZED) {
3758  __ LoadHeapObject(edi, function);
3759  }
3760 
3761  // Change context.
3763 
3764  // Set eax to arguments count if adaption is not needed. Assumes that eax
3765  // is available to write to at this point.
3766  if (dont_adapt_arguments) {
3767  __ mov(eax, arity);
3768  }
3769 
3770  // Invoke function directly.
3771  if (function.is_identical_to(info()->closure())) {
3772  __ CallSelf();
3773  } else {
3775  }
3776  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3777  } else {
3778  // We need to adapt arguments.
3779  LPointerMap* pointers = instr->pointer_map();
3780  SafepointGenerator generator(
3781  this, pointers, Safepoint::kLazyDeopt);
3782  ParameterCount count(arity);
3783  ParameterCount expected(formal_parameter_count);
3784  __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3785  }
3786 }
3787 
3788 
3789 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3790  ASSERT(ToRegister(instr->result()).is(eax));
3791 
3792  LPointerMap* pointers = instr->pointer_map();
3793  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3794 
3795  if (instr->target()->IsConstantOperand()) {
3796  LConstantOperand* target = LConstantOperand::cast(instr->target());
3797  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3798  generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3799  __ call(code, RelocInfo::CODE_TARGET);
3800  } else {
3801  ASSERT(instr->target()->IsRegister());
3802  Register target = ToRegister(instr->target());
3803  generator.BeforeCall(__ CallSize(Operand(target)));
3804  __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3805  __ call(target);
3806  }
3807  generator.AfterCall();
3808 }
3809 
3810 
3811 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3812  ASSERT(ToRegister(instr->function()).is(edi));
3813  ASSERT(ToRegister(instr->result()).is(eax));
3814 
3815  if (instr->hydrogen()->pass_argument_count()) {
3816  __ mov(eax, instr->arity());
3817  }
3818 
3819  // Change context.
3821 
3822  bool is_self_call = false;
3823  if (instr->hydrogen()->function()->IsConstant()) {
3824  HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3825  Handle<JSFunction> jsfun =
3826  Handle<JSFunction>::cast(fun_const->handle(isolate()));
3827  is_self_call = jsfun.is_identical_to(info()->closure());
3828  }
3829 
3830  if (is_self_call) {
3831  __ CallSelf();
3832  } else {
3834  }
3835 
3836  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3837 }
3838 
3839 
3840 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3841  Register input_reg = ToRegister(instr->value());
3842  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3843  factory()->heap_number_map());
3844  DeoptimizeIf(not_equal, instr->environment());
3845 
3846  Label slow, allocated, done;
3847  Register tmp = input_reg.is(eax) ? ecx : eax;
3848  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3849 
3850  // Preserve the value of all registers.
3851  PushSafepointRegistersScope scope(this);
3852 
3853  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3854  // Check the sign of the argument. If the argument is positive, just
3855  // return it. We do not need to patch the stack since |input| and
3856  // |result| are the same register and |input| will be restored
3857  // unchanged by popping safepoint registers.
3858  __ test(tmp, Immediate(HeapNumber::kSignMask));
3859  __ j(zero, &done, Label::kNear);
3860 
3861  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3862  __ jmp(&allocated, Label::kNear);
3863 
3864  // Slow case: Call the runtime system to do the number allocation.
3865  __ bind(&slow);
3866  CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
3867  instr, instr->context());
3868  // Set the pointer to the new heap number in tmp.
3869  if (!tmp.is(eax)) __ mov(tmp, eax);
3870  // Restore input_reg after call to runtime.
3871  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3872 
3873  __ bind(&allocated);
3874  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3875  __ and_(tmp2, ~HeapNumber::kSignMask);
3876  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3877  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3878  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3879  __ StoreToSafepointRegisterSlot(input_reg, tmp);
3880 
3881  __ bind(&done);
3882 }
3883 
3884 
3885 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3886  Register input_reg = ToRegister(instr->value());
3887  __ test(input_reg, Operand(input_reg));
3888  Label is_positive;
3889  __ j(not_sign, &is_positive, Label::kNear);
3890  __ neg(input_reg); // Sets flags.
3891  DeoptimizeIf(negative, instr->environment());
3892  __ bind(&is_positive);
3893 }
3894 
3895 
3896 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3897  // Class for deferred case.
3898  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3899  public:
3900  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3901  LMathAbs* instr,
3902  const X87Stack& x87_stack)
3903  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3904  virtual void Generate() V8_OVERRIDE {
3905  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3906  }
3907  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3908  private:
3909  LMathAbs* instr_;
3910  };
3911 
3912  ASSERT(instr->value()->Equals(instr->result()));
3913  Representation r = instr->hydrogen()->value()->representation();
3914 
3915  CpuFeatureScope scope(masm(), SSE2);
3916  if (r.IsDouble()) {
3917  XMMRegister scratch = double_scratch0();
3918  XMMRegister input_reg = ToDoubleRegister(instr->value());
3919  __ xorps(scratch, scratch);
3920  __ subsd(scratch, input_reg);
3921  __ andps(input_reg, scratch);
3922  } else if (r.IsSmiOrInteger32()) {
3923  EmitIntegerMathAbs(instr);
3924  } else { // Tagged case.
3925  DeferredMathAbsTaggedHeapNumber* deferred =
3926  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
3927  Register input_reg = ToRegister(instr->value());
3928  // Smi check.
3929  __ JumpIfNotSmi(input_reg, deferred->entry());
3930  EmitIntegerMathAbs(instr);
3931  __ bind(deferred->exit());
3932  }
3933 }
3934 
3935 
3936 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3937  CpuFeatureScope scope(masm(), SSE2);
3938  XMMRegister xmm_scratch = double_scratch0();
3939  Register output_reg = ToRegister(instr->result());
3940  XMMRegister input_reg = ToDoubleRegister(instr->value());
3941 
3943  CpuFeatureScope scope(masm(), SSE4_1);
3944  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3945  // Deoptimize on negative zero.
3946  Label non_zero;
3947  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3948  __ ucomisd(input_reg, xmm_scratch);
3949  __ j(not_equal, &non_zero, Label::kNear);
3950  __ movmskpd(output_reg, input_reg);
3951  __ test(output_reg, Immediate(1));
3952  DeoptimizeIf(not_zero, instr->environment());
3953  __ bind(&non_zero);
3954  }
3955  __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3956  __ cvttsd2si(output_reg, Operand(xmm_scratch));
3957  // Overflow is signalled with minint.
3958  __ cmp(output_reg, 0x1);
3959  DeoptimizeIf(overflow, instr->environment());
3960  } else {
3961  Label negative_sign, done;
3962  // Deoptimize on unordered.
3963  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3964  __ ucomisd(input_reg, xmm_scratch);
3965  DeoptimizeIf(parity_even, instr->environment());
3966  __ j(below, &negative_sign, Label::kNear);
3967 
3968  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3969  // Check for negative zero.
3970  Label positive_sign;
3971  __ j(above, &positive_sign, Label::kNear);
3972  __ movmskpd(output_reg, input_reg);
3973  __ test(output_reg, Immediate(1));
3974  DeoptimizeIf(not_zero, instr->environment());
3975  __ Move(output_reg, Immediate(0));
3976  __ jmp(&done, Label::kNear);
3977  __ bind(&positive_sign);
3978  }
3979 
3980  // Use truncating instruction (OK because input is positive).
3981  __ cvttsd2si(output_reg, Operand(input_reg));
3982  // Overflow is signalled with minint.
3983  __ cmp(output_reg, 0x1);
3984  DeoptimizeIf(overflow, instr->environment());
3985  __ jmp(&done, Label::kNear);
3986 
3987  // Non-zero negative reaches here.
3988  __ bind(&negative_sign);
3989  // Truncate, then compare and compensate.
3990  __ cvttsd2si(output_reg, Operand(input_reg));
3991  __ Cvtsi2sd(xmm_scratch, output_reg);
3992  __ ucomisd(input_reg, xmm_scratch);
3993  __ j(equal, &done, Label::kNear);
3994  __ sub(output_reg, Immediate(1));
3995  DeoptimizeIf(overflow, instr->environment());
3996 
3997  __ bind(&done);
3998  }
3999 }
4000 
4001 
4002 void LCodeGen::DoMathRound(LMathRound* instr) {
4003  CpuFeatureScope scope(masm(), SSE2);
4004  Register output_reg = ToRegister(instr->result());
4005  XMMRegister input_reg = ToDoubleRegister(instr->value());
4006  XMMRegister xmm_scratch = double_scratch0();
4007  XMMRegister input_temp = ToDoubleRegister(instr->temp());
4008  ExternalReference one_half = ExternalReference::address_of_one_half();
4009  ExternalReference minus_one_half =
4010  ExternalReference::address_of_minus_one_half();
4011 
4012  Label done, round_to_zero, below_one_half, do_not_compensate;
4013  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4014 
4015  __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
4016  __ ucomisd(xmm_scratch, input_reg);
4017  __ j(above, &below_one_half, Label::kNear);
4018 
4019  // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
4020  __ addsd(xmm_scratch, input_reg);
4021  __ cvttsd2si(output_reg, Operand(xmm_scratch));
4022  // Overflow is signalled with minint.
4023  __ cmp(output_reg, 0x1);
4024  __ RecordComment("D2I conversion overflow");
4025  DeoptimizeIf(overflow, instr->environment());
4026  __ jmp(&done, dist);
4027 
4028  __ bind(&below_one_half);
4029  __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
4030  __ ucomisd(xmm_scratch, input_reg);
4031  __ j(below_equal, &round_to_zero, Label::kNear);
4032 
4033  // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
4034  // compare and compensate.
4035  __ movaps(input_temp, input_reg); // Do not alter input_reg.
4036  __ subsd(input_temp, xmm_scratch);
4037  __ cvttsd2si(output_reg, Operand(input_temp));
4038  // Catch minint due to overflow, and to prevent overflow when compensating.
4039  __ cmp(output_reg, 0x1);
4040  __ RecordComment("D2I conversion overflow");
4041  DeoptimizeIf(overflow, instr->environment());
4042 
4043  __ Cvtsi2sd(xmm_scratch, output_reg);
4044  __ ucomisd(xmm_scratch, input_temp);
4045  __ j(equal, &done, dist);
4046  __ sub(output_reg, Immediate(1));
4047  // No overflow because we already ruled out minint.
4048  __ jmp(&done, dist);
4049 
4050  __ bind(&round_to_zero);
4051  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
4052  // we can ignore the difference between a result of -0 and +0.
4053  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4054  // If the sign is positive, we return +0.
4055  __ movmskpd(output_reg, input_reg);
4056  __ test(output_reg, Immediate(1));
4057  __ RecordComment("Minus zero");
4058  DeoptimizeIf(not_zero, instr->environment());
4059  }
4060  __ Move(output_reg, Immediate(0));
4061  __ bind(&done);
4062 }
4063 
4064 
4065 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4066  CpuFeatureScope scope(masm(), SSE2);
4067  XMMRegister input_reg = ToDoubleRegister(instr->value());
4068  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4069  __ sqrtsd(input_reg, input_reg);
4070 }
4071 
4072 
4073 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4074  CpuFeatureScope scope(masm(), SSE2);
4075  XMMRegister xmm_scratch = double_scratch0();
4076  XMMRegister input_reg = ToDoubleRegister(instr->value());
4077  Register scratch = ToRegister(instr->temp());
4078  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4079 
4080  // Note that according to ECMA-262 15.8.2.13:
4081  // Math.pow(-Infinity, 0.5) == Infinity
4082  // Math.sqrt(-Infinity) == NaN
4083  Label done, sqrt;
4084  // Check base for -Infinity. According to IEEE-754, single-precision
4085  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
4086  __ mov(scratch, 0xFF800000);
4087  __ movd(xmm_scratch, scratch);
4088  __ cvtss2sd(xmm_scratch, xmm_scratch);
4089  __ ucomisd(input_reg, xmm_scratch);
4090  // Comparing -Infinity with NaN results in "unordered", which sets the
4091  // zero flag as if both were equal. However, it also sets the carry flag.
4092  __ j(not_equal, &sqrt, Label::kNear);
4093  __ j(carry, &sqrt, Label::kNear);
4094  // If input is -Infinity, return Infinity.
4095  __ xorps(input_reg, input_reg);
4096  __ subsd(input_reg, xmm_scratch);
4097  __ jmp(&done, Label::kNear);
4098 
4099  // Square root.
4100  __ bind(&sqrt);
4101  __ xorps(xmm_scratch, xmm_scratch);
4102  __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
4103  __ sqrtsd(input_reg, input_reg);
4104  __ bind(&done);
4105 }
4106 
4107 
4108 void LCodeGen::DoPower(LPower* instr) {
4109  Representation exponent_type = instr->hydrogen()->right()->representation();
4110  // Having marked this as a call, we can use any registers.
4111  // Just make sure that the input/output registers are the expected ones.
4112  ASSERT(!instr->right()->IsDoubleRegister() ||
4113  ToDoubleRegister(instr->right()).is(xmm1));
4114  ASSERT(!instr->right()->IsRegister() ||
4115  ToRegister(instr->right()).is(eax));
4116  ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
4117  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
4118 
4119  if (exponent_type.IsSmi()) {
4120  MathPowStub stub(MathPowStub::TAGGED);
4121  __ CallStub(&stub);
4122  } else if (exponent_type.IsTagged()) {
4123  Label no_deopt;
4124  __ JumpIfSmi(eax, &no_deopt);
4125  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
4126  DeoptimizeIf(not_equal, instr->environment());
4127  __ bind(&no_deopt);
4128  MathPowStub stub(MathPowStub::TAGGED);
4129  __ CallStub(&stub);
4130  } else if (exponent_type.IsInteger32()) {
4131  MathPowStub stub(MathPowStub::INTEGER);
4132  __ CallStub(&stub);
4133  } else {
4134  ASSERT(exponent_type.IsDouble());
4135  MathPowStub stub(MathPowStub::DOUBLE);
4136  __ CallStub(&stub);
4137  }
4138 }
4139 
4140 
4141 void LCodeGen::DoMathLog(LMathLog* instr) {
4142  CpuFeatureScope scope(masm(), SSE2);
4143  ASSERT(instr->value()->Equals(instr->result()));
4144  XMMRegister input_reg = ToDoubleRegister(instr->value());
4145  XMMRegister xmm_scratch = double_scratch0();
4146  Label positive, done, zero;
4147  __ xorps(xmm_scratch, xmm_scratch);
4148  __ ucomisd(input_reg, xmm_scratch);
4149  __ j(above, &positive, Label::kNear);
4150  __ j(not_carry, &zero, Label::kNear);
4151  ExternalReference nan =
4152  ExternalReference::address_of_canonical_non_hole_nan();
4153  __ movsd(input_reg, Operand::StaticVariable(nan));
4154  __ jmp(&done, Label::kNear);
4155  __ bind(&zero);
4156  ExternalReference ninf =
4157  ExternalReference::address_of_negative_infinity();
4158  __ movsd(input_reg, Operand::StaticVariable(ninf));
4159  __ jmp(&done, Label::kNear);
4160  __ bind(&positive);
4161  __ fldln2();
4162  __ sub(Operand(esp), Immediate(kDoubleSize));
4163  __ movsd(Operand(esp, 0), input_reg);
4164  __ fld_d(Operand(esp, 0));
4165  __ fyl2x();
4166  __ fstp_d(Operand(esp, 0));
4167  __ movsd(input_reg, Operand(esp, 0));
4168  __ add(Operand(esp), Immediate(kDoubleSize));
4169  __ bind(&done);
4170 }
4171 
4172 
4173 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4174  CpuFeatureScope scope(masm(), SSE2);
4175  Register input = ToRegister(instr->value());
4176  Register result = ToRegister(instr->result());
4177  Label not_zero_input;
4178  __ bsr(result, input);
4179 
4180  __ j(not_zero, &not_zero_input);
4181  __ Move(result, Immediate(63)); // 63^31 == 32
4182 
4183  __ bind(&not_zero_input);
4184  __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
4185 }
4186 
4187 
4188 void LCodeGen::DoMathExp(LMathExp* instr) {
4189  CpuFeatureScope scope(masm(), SSE2);
4190  XMMRegister input = ToDoubleRegister(instr->value());
4191  XMMRegister result = ToDoubleRegister(instr->result());
4192  XMMRegister temp0 = double_scratch0();
4193  Register temp1 = ToRegister(instr->temp1());
4194  Register temp2 = ToRegister(instr->temp2());
4195 
4196  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4197 }
4198 
4199 
4200 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4201  ASSERT(ToRegister(instr->context()).is(esi));
4202  ASSERT(ToRegister(instr->function()).is(edi));
4203  ASSERT(instr->HasPointerMap());
4204 
4205  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4206  if (known_function.is_null()) {
4207  LPointerMap* pointers = instr->pointer_map();
4208  SafepointGenerator generator(
4209  this, pointers, Safepoint::kLazyDeopt);
4210  ParameterCount count(instr->arity());
4211  __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
4212  } else {
4213  CallKnownFunction(known_function,
4214  instr->hydrogen()->formal_parameter_count(),
4215  instr->arity(),
4216  instr,
4217  EDI_CONTAINS_TARGET);
4218  }
4219 }
4220 
4221 
4222 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4223  ASSERT(ToRegister(instr->context()).is(esi));
4224  ASSERT(ToRegister(instr->function()).is(edi));
4225  ASSERT(ToRegister(instr->result()).is(eax));
4226 
4227  int arity = instr->arity();
4228  CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
4229  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4230 }
4231 
4232 
4233 void LCodeGen::DoCallNew(LCallNew* instr) {
4234  ASSERT(ToRegister(instr->context()).is(esi));
4235  ASSERT(ToRegister(instr->constructor()).is(edi));
4236  ASSERT(ToRegister(instr->result()).is(eax));
4237 
4238  // No cell in ebx for construct type feedback in optimized code
4239  __ mov(ebx, isolate()->factory()->undefined_value());
4240  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4241  __ Move(eax, Immediate(instr->arity()));
4242  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4243 }
4244 
4245 
4246 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4247  ASSERT(ToRegister(instr->context()).is(esi));
4248  ASSERT(ToRegister(instr->constructor()).is(edi));
4249  ASSERT(ToRegister(instr->result()).is(eax));
4250 
4251  __ Move(eax, Immediate(instr->arity()));
4252  __ mov(ebx, isolate()->factory()->undefined_value());
4253  ElementsKind kind = instr->hydrogen()->elements_kind();
4254  AllocationSiteOverrideMode override_mode =
4257  : DONT_OVERRIDE;
4258 
4259  if (instr->arity() == 0) {
4260  ArrayNoArgumentConstructorStub stub(kind, override_mode);
4261  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4262  } else if (instr->arity() == 1) {
4263  Label done;
4264  if (IsFastPackedElementsKind(kind)) {
4265  Label packed_case;
4266  // We might need a change here
4267  // look at the first argument
4268  __ mov(ecx, Operand(esp, 0));
4269  __ test(ecx, ecx);
4270  __ j(zero, &packed_case, Label::kNear);
4271 
4272  ElementsKind holey_kind = GetHoleyElementsKind(kind);
4273  ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
4274  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4275  __ jmp(&done, Label::kNear);
4276  __ bind(&packed_case);
4277  }
4278 
4279  ArraySingleArgumentConstructorStub stub(kind, override_mode);
4280  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4281  __ bind(&done);
4282  } else {
4283  ArrayNArgumentsConstructorStub stub(kind, override_mode);
4284  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4285  }
4286 }
4287 
4288 
4289 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4290  ASSERT(ToRegister(instr->context()).is(esi));
4291  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4292 }
4293 
4294 
4295 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4296  Register function = ToRegister(instr->function());
4297  Register code_object = ToRegister(instr->code_object());
4298  __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
4299  __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4300 }
4301 
4302 
4303 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4304  Register result = ToRegister(instr->result());
4305  Register base = ToRegister(instr->base_object());
4306  if (instr->offset()->IsConstantOperand()) {
4307  LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4308  __ lea(result, Operand(base, ToInteger32(offset)));
4309  } else {
4310  Register offset = ToRegister(instr->offset());
4311  __ lea(result, Operand(base, offset, times_1, 0));
4312  }
4313 }
4314 
4315 
4316 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4317  Representation representation = instr->representation();
4318 
4319  HObjectAccess access = instr->hydrogen()->access();
4320  int offset = access.offset();
4321 
4322  if (access.IsExternalMemory()) {
4323  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4324  MemOperand operand = instr->object()->IsConstantOperand()
4325  ? MemOperand::StaticVariable(
4326  ToExternalReference(LConstantOperand::cast(instr->object())))
4327  : MemOperand(ToRegister(instr->object()), offset);
4328  if (instr->value()->IsConstantOperand()) {
4329  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4330  __ mov(operand, Immediate(ToInteger32(operand_value)));
4331  } else {
4332  Register value = ToRegister(instr->value());
4333  __ Store(value, operand, representation);
4334  }
4335  return;
4336  }
4337 
4338  Register object = ToRegister(instr->object());
4339  Handle<Map> transition = instr->transition();
4340  SmiCheck check_needed =
4341  instr->hydrogen()->value()->IsHeapObject()
4343 
4344  ASSERT(!(representation.IsSmi() &&
4345  instr->value()->IsConstantOperand() &&
4346  !IsSmi(LConstantOperand::cast(instr->value()))));
4347  if (representation.IsHeapObject()) {
4348  if (instr->value()->IsConstantOperand()) {
4349  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4350  if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
4351  DeoptimizeIf(no_condition, instr->environment());
4352  }
4353  } else {
4354  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4355  Register value = ToRegister(instr->value());
4356  __ test(value, Immediate(kSmiTagMask));
4357  DeoptimizeIf(zero, instr->environment());
4358 
4359  // We know that value is a smi now, so we can omit the check below.
4360  check_needed = OMIT_SMI_CHECK;
4361  }
4362  }
4363  } else if (representation.IsDouble()) {
4364  ASSERT(transition.is_null());
4365  ASSERT(access.IsInobject());
4366  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4368  CpuFeatureScope scope(masm(), SSE2);
4369  XMMRegister value = ToDoubleRegister(instr->value());
4370  __ movsd(FieldOperand(object, offset), value);
4371  } else {
4372  X87Register value = ToX87Register(instr->value());
4373  X87Mov(FieldOperand(object, offset), value);
4374  }
4375  return;
4376  }
4377 
4378  if (!transition.is_null()) {
4379  if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
4380  __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4381  } else {
4382  Register temp = ToRegister(instr->temp());
4383  Register temp_map = ToRegister(instr->temp_map());
4384  __ mov(temp_map, transition);
4385  __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4386  // Update the write barrier for the map field.
4387  __ RecordWriteField(object,
4389  temp_map,
4390  temp,
4391  GetSaveFPRegsMode(),
4393  OMIT_SMI_CHECK);
4394  }
4395  }
4396 
4397  // Do the store.
4398  Register write_register = object;
4399  if (!access.IsInobject()) {
4400  write_register = ToRegister(instr->temp());
4401  __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4402  }
4403 
4404  MemOperand operand = FieldOperand(write_register, offset);
4405  if (instr->value()->IsConstantOperand()) {
4406  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4407  if (operand_value->IsRegister()) {
4408  Register value = ToRegister(operand_value);
4409  __ Store(value, operand, representation);
4410  } else if (representation.IsInteger32()) {
4411  Immediate immediate = ToImmediate(operand_value, representation);
4412  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4413  __ mov(operand, immediate);
4414  } else {
4415  Handle<Object> handle_value = ToHandle(operand_value);
4416  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4417  __ mov(operand, handle_value);
4418  }
4419  } else {
4420  Register value = ToRegister(instr->value());
4421  __ Store(value, operand, representation);
4422  }
4423 
4424  if (instr->hydrogen()->NeedsWriteBarrier()) {
4425  Register value = ToRegister(instr->value());
4426  Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4427  // Update the write barrier for the object for in-object properties.
4428  __ RecordWriteField(write_register,
4429  offset,
4430  value,
4431  temp,
4432  GetSaveFPRegsMode(),
4434  check_needed);
4435  }
4436 }
4437 
4438 
4439 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4440  ASSERT(ToRegister(instr->context()).is(esi));
4441  ASSERT(ToRegister(instr->object()).is(edx));
4442  ASSERT(ToRegister(instr->value()).is(eax));
4443 
4444  __ mov(ecx, instr->name());
4445  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4446  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4447 }
4448 
4449 
4450 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
4451  if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4452  Label done;
4453  __ j(NegateCondition(cc), &done, Label::kNear);
4454  __ int3();
4455  __ bind(&done);
4456  } else {
4457  DeoptimizeIf(cc, check->environment());
4458  }
4459 }
4460 
4461 
4462 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4463  if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return;
4464 
4465  if (instr->index()->IsConstantOperand()) {
4466  Immediate immediate =
4467  ToImmediate(LConstantOperand::cast(instr->index()),
4468  instr->hydrogen()->length()->representation());
4469  __ cmp(ToOperand(instr->length()), immediate);
4470  Condition condition =
4471  instr->hydrogen()->allow_equality() ? below : below_equal;
4472  ApplyCheckIf(condition, instr);
4473  } else {
4474  __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4475  Condition condition =
4476  instr->hydrogen()->allow_equality() ? above : above_equal;
4477  ApplyCheckIf(condition, instr);
4478  }
4479 }
4480 
4481 
4482 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4483  ElementsKind elements_kind = instr->elements_kind();
4484  LOperand* key = instr->key();
4485  if (!key->IsConstantOperand() &&
4486  ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4487  elements_kind)) {
4488  __ SmiUntag(ToRegister(key));
4489  }
4490  Operand operand(BuildFastArrayOperand(
4491  instr->elements(),
4492  key,
4493  instr->hydrogen()->key()->representation(),
4494  elements_kind,
4495  0,
4496  instr->additional_index()));
4497  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4498  elements_kind == FLOAT32_ELEMENTS) {
4500  CpuFeatureScope scope(masm(), SSE2);
4501  XMMRegister xmm_scratch = double_scratch0();
4502  __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
4503  __ movss(operand, xmm_scratch);
4504  } else {
4505  __ fld(0);
4506  __ fstp_s(operand);
4507  }
4508  } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4509  elements_kind == FLOAT64_ELEMENTS) {
4511  CpuFeatureScope scope(masm(), SSE2);
4512  __ movsd(operand, ToDoubleRegister(instr->value()));
4513  } else {
4514  X87Mov(operand, ToX87Register(instr->value()));
4515  }
4516  } else {
4517  Register value = ToRegister(instr->value());
4518  switch (elements_kind) {
4522  case UINT8_ELEMENTS:
4523  case INT8_ELEMENTS:
4525  __ mov_b(operand, value);
4526  break;
4529  case UINT16_ELEMENTS:
4530  case INT16_ELEMENTS:
4531  __ mov_w(operand, value);
4532  break;
4535  case UINT32_ELEMENTS:
4536  case INT32_ELEMENTS:
4537  __ mov(operand, value);
4538  break;
4541  case FLOAT32_ELEMENTS:
4542  case FLOAT64_ELEMENTS:
4543  case FAST_SMI_ELEMENTS:
4544  case FAST_ELEMENTS:
4545  case FAST_DOUBLE_ELEMENTS:
4547  case FAST_HOLEY_ELEMENTS:
4549  case DICTIONARY_ELEMENTS:
4551  UNREACHABLE();
4552  break;
4553  }
4554  }
4555 }
4556 
4557 
4558 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4559  ExternalReference canonical_nan_reference =
4560  ExternalReference::address_of_canonical_non_hole_nan();
4561  Operand double_store_operand = BuildFastArrayOperand(
4562  instr->elements(),
4563  instr->key(),
4564  instr->hydrogen()->key()->representation(),
4567  instr->additional_index());
4568 
4570  CpuFeatureScope scope(masm(), SSE2);
4571  XMMRegister value = ToDoubleRegister(instr->value());
4572 
4573  if (instr->NeedsCanonicalization()) {
4574  Label have_value;
4575 
4576  __ ucomisd(value, value);
4577  __ j(parity_odd, &have_value, Label::kNear); // NaN.
4578 
4579  __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
4580  __ bind(&have_value);
4581  }
4582 
4583  __ movsd(double_store_operand, value);
4584  } else {
4585  // Can't use SSE2 in the serializer
4586  if (instr->hydrogen()->IsConstantHoleStore()) {
4587  // This means we should store the (double) hole. No floating point
4588  // registers required.
4589  double nan_double = FixedDoubleArray::hole_nan_as_double();
4590  uint64_t int_val = BitCast<uint64_t, double>(nan_double);
4591  int32_t lower = static_cast<int32_t>(int_val);
4592  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4593 
4594  __ mov(double_store_operand, Immediate(lower));
4595  Operand double_store_operand2 = BuildFastArrayOperand(
4596  instr->elements(),
4597  instr->key(),
4598  instr->hydrogen()->key()->representation(),
4600  FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
4601  instr->additional_index());
4602  __ mov(double_store_operand2, Immediate(upper));
4603  } else {
4604  Label no_special_nan_handling;
4605  X87Register value = ToX87Register(instr->value());
4606  X87Fxch(value);
4607 
4608  if (instr->NeedsCanonicalization()) {
4609  __ fld(0);
4610  __ fld(0);
4611  __ FCmp();
4612 
4613  __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4614  __ sub(esp, Immediate(kDoubleSize));
4615  __ fst_d(MemOperand(esp, 0));
4616  __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4617  Immediate(kHoleNanUpper32));
4618  __ add(esp, Immediate(kDoubleSize));
4619  Label canonicalize;
4620  __ j(not_equal, &canonicalize, Label::kNear);
4621  __ jmp(&no_special_nan_handling, Label::kNear);
4622  __ bind(&canonicalize);
4623  __ fstp(0);
4624  __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4625  }
4626 
4627  __ bind(&no_special_nan_handling);
4628  __ fst_d(double_store_operand);
4629  }
4630  }
4631 }
4632 
4633 
4634 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4635  Register elements = ToRegister(instr->elements());
4636  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4637 
4638  Operand operand = BuildFastArrayOperand(
4639  instr->elements(),
4640  instr->key(),
4641  instr->hydrogen()->key()->representation(),
4642  FAST_ELEMENTS,
4644  instr->additional_index());
4645  if (instr->value()->IsRegister()) {
4646  __ mov(operand, ToRegister(instr->value()));
4647  } else {
4648  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4649  if (IsSmi(operand_value)) {
4650  Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4651  __ mov(operand, immediate);
4652  } else {
4653  ASSERT(!IsInteger32(operand_value));
4654  Handle<Object> handle_value = ToHandle(operand_value);
4655  __ mov(operand, handle_value);
4656  }
4657  }
4658 
4659  if (instr->hydrogen()->NeedsWriteBarrier()) {
4660  ASSERT(instr->value()->IsRegister());
4661  Register value = ToRegister(instr->value());
4662  ASSERT(!instr->key()->IsConstantOperand());
4663  SmiCheck check_needed =
4664  instr->hydrogen()->value()->IsHeapObject()
4666  // Compute address of modified element and store it into key register.
4667  __ lea(key, operand);
4668  __ RecordWrite(elements,
4669  key,
4670  value,
4671  GetSaveFPRegsMode(),
4673  check_needed);
4674  }
4675 }
4676 
4677 
4678 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4679  // By cases...external, fast-double, fast
4680  if (instr->is_typed_elements()) {
4681  DoStoreKeyedExternalArray(instr);
4682  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4683  DoStoreKeyedFixedDoubleArray(instr);
4684  } else {
4685  DoStoreKeyedFixedArray(instr);
4686  }
4687 }
4688 
4689 
4690 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4691  ASSERT(ToRegister(instr->context()).is(esi));
4692  ASSERT(ToRegister(instr->object()).is(edx));
4693  ASSERT(ToRegister(instr->key()).is(ecx));
4694  ASSERT(ToRegister(instr->value()).is(eax));
4695 
4696  Handle<Code> ic = instr->strict_mode() == STRICT
4697  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4698  : isolate()->builtins()->KeyedStoreIC_Initialize();
4699  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4700 }
4701 
4702 
4703 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4704  Register object = ToRegister(instr->object());
4705  Register temp = ToRegister(instr->temp());
4706  Label no_memento_found;
4707  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4708  DeoptimizeIf(equal, instr->environment());
4709  __ bind(&no_memento_found);
4710 }
4711 
4712 
4713 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4714  Register object_reg = ToRegister(instr->object());
4715 
4716  Handle<Map> from_map = instr->original_map();
4717  Handle<Map> to_map = instr->transitioned_map();
4718  ElementsKind from_kind = instr->from_kind();
4719  ElementsKind to_kind = instr->to_kind();
4720 
4721  Label not_applicable;
4722  bool is_simple_map_transition =
4723  IsSimpleMapChangeTransition(from_kind, to_kind);
4724  Label::Distance branch_distance =
4725  is_simple_map_transition ? Label::kNear : Label::kFar;
4726  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4727  __ j(not_equal, &not_applicable, branch_distance);
4728  if (is_simple_map_transition) {
4729  Register new_map_reg = ToRegister(instr->new_map_temp());
4730  __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4731  Immediate(to_map));
4732  // Write barrier.
4733  ASSERT_NE(instr->temp(), NULL);
4734  __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4735  ToRegister(instr->temp()),
4736  kDontSaveFPRegs);
4737  } else {
4738  ASSERT(ToRegister(instr->context()).is(esi));
4739  PushSafepointRegistersScope scope(this);
4740  if (!object_reg.is(eax)) {
4741  __ mov(eax, object_reg);
4742  }
4743  __ mov(ebx, to_map);
4744  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4745  TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
4746  __ CallStub(&stub);
4747  RecordSafepointWithRegisters(
4748  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4749  }
4750  __ bind(&not_applicable);
4751 }
4752 
4753 
4754 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4755  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4756  public:
4757  DeferredStringCharCodeAt(LCodeGen* codegen,
4758  LStringCharCodeAt* instr,
4759  const X87Stack& x87_stack)
4760  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4761  virtual void Generate() V8_OVERRIDE {
4762  codegen()->DoDeferredStringCharCodeAt(instr_);
4763  }
4764  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4765  private:
4766  LStringCharCodeAt* instr_;
4767  };
4768 
4769  DeferredStringCharCodeAt* deferred =
4770  new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4771 
4773  factory(),
4774  ToRegister(instr->string()),
4775  ToRegister(instr->index()),
4776  ToRegister(instr->result()),
4777  deferred->entry());
4778  __ bind(deferred->exit());
4779 }
4780 
4781 
4782 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4783  Register string = ToRegister(instr->string());
4784  Register result = ToRegister(instr->result());
4785 
4786  // TODO(3095996): Get rid of this. For now, we need to make the
4787  // result register contain a valid pointer because it is already
4788  // contained in the register pointer map.
4789  __ Move(result, Immediate(0));
4790 
4791  PushSafepointRegistersScope scope(this);
4792  __ push(string);
4793  // Push the index as a smi. This is safe because of the checks in
4794  // DoStringCharCodeAt above.
4796  if (instr->index()->IsConstantOperand()) {
4797  Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4799  __ push(immediate);
4800  } else {
4801  Register index = ToRegister(instr->index());
4802  __ SmiTag(index);
4803  __ push(index);
4804  }
4805  CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
4806  instr, instr->context());
4807  __ AssertSmi(eax);
4808  __ SmiUntag(eax);
4809  __ StoreToSafepointRegisterSlot(result, eax);
4810 }
4811 
4812 
4813 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4814  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4815  public:
4816  DeferredStringCharFromCode(LCodeGen* codegen,
4817  LStringCharFromCode* instr,
4818  const X87Stack& x87_stack)
4819  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4820  virtual void Generate() V8_OVERRIDE {
4821  codegen()->DoDeferredStringCharFromCode(instr_);
4822  }
4823  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4824  private:
4825  LStringCharFromCode* instr_;
4826  };
4827 
4828  DeferredStringCharFromCode* deferred =
4829  new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4830 
4831  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4832  Register char_code = ToRegister(instr->char_code());
4833  Register result = ToRegister(instr->result());
4834  ASSERT(!char_code.is(result));
4835 
4836  __ cmp(char_code, String::kMaxOneByteCharCode);
4837  __ j(above, deferred->entry());
4838  __ Move(result, Immediate(factory()->single_character_string_cache()));
4839  __ mov(result, FieldOperand(result,
4840  char_code, times_pointer_size,
4842  __ cmp(result, factory()->undefined_value());
4843  __ j(equal, deferred->entry());
4844  __ bind(deferred->exit());
4845 }
4846 
4847 
4848 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4849  Register char_code = ToRegister(instr->char_code());
4850  Register result = ToRegister(instr->result());
4851 
4852  // TODO(3095996): Get rid of this. For now, we need to make the
4853  // result register contain a valid pointer because it is already
4854  // contained in the register pointer map.
4855  __ Move(result, Immediate(0));
4856 
4857  PushSafepointRegistersScope scope(this);
4858  __ SmiTag(char_code);
4859  __ push(char_code);
4860  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4861  __ StoreToSafepointRegisterSlot(result, eax);
4862 }
4863 
4864 
4865 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4866  ASSERT(ToRegister(instr->context()).is(esi));
4867  ASSERT(ToRegister(instr->left()).is(edx));
4868  ASSERT(ToRegister(instr->right()).is(eax));
4869  StringAddStub stub(instr->hydrogen()->flags(),
4870  instr->hydrogen()->pretenure_flag());
4871  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4872 }
4873 
4874 
4875 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4876  LOperand* input = instr->value();
4877  LOperand* output = instr->result();
4878  ASSERT(input->IsRegister() || input->IsStackSlot());
4879  ASSERT(output->IsDoubleRegister());
4881  CpuFeatureScope scope(masm(), SSE2);
4882  __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4883  } else if (input->IsRegister()) {
4884  Register input_reg = ToRegister(input);
4885  __ push(input_reg);
4886  X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4887  __ pop(input_reg);
4888  } else {
4889  X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4890  }
4891 }
4892 
4893 
4894 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4895  LOperand* input = instr->value();
4896  LOperand* output = instr->result();
4898  CpuFeatureScope scope(masm(), SSE2);
4899  LOperand* temp = instr->temp();
4900 
4901  __ LoadUint32(ToDoubleRegister(output),
4902  ToRegister(input),
4903  ToDoubleRegister(temp));
4904  } else {
4905  X87Register res = ToX87Register(output);
4906  X87PrepareToWrite(res);
4907  __ LoadUint32NoSSE2(ToRegister(input));
4908  X87CommitWrite(res);
4909  }
4910 }
4911 
4912 
4913 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4914  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4915  public:
4916  DeferredNumberTagI(LCodeGen* codegen,
4917  LNumberTagI* instr,
4918  const X87Stack& x87_stack)
4919  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4920  virtual void Generate() V8_OVERRIDE {
4921  codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4922  NULL, SIGNED_INT32);
4923  }
4924  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4925  private:
4926  LNumberTagI* instr_;
4927  };
4928 
4929  LOperand* input = instr->value();
4930  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4931  Register reg = ToRegister(input);
4932 
4933  DeferredNumberTagI* deferred =
4934  new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
4935  __ SmiTag(reg);
4936  __ j(overflow, deferred->entry());
4937  __ bind(deferred->exit());
4938 }
4939 
4940 
4941 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4942  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4943  public:
4944  DeferredNumberTagU(LCodeGen* codegen,
4945  LNumberTagU* instr,
4946  const X87Stack& x87_stack)
4947  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4948  virtual void Generate() V8_OVERRIDE {
4949  codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4950  instr_->temp2(), UNSIGNED_INT32);
4951  }
4952  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4953  private:
4954  LNumberTagU* instr_;
4955  };
4956 
4957  LOperand* input = instr->value();
4958  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4959  Register reg = ToRegister(input);
4960 
4961  DeferredNumberTagU* deferred =
4962  new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
4963  __ cmp(reg, Immediate(Smi::kMaxValue));
4964  __ j(above, deferred->entry());
4965  __ SmiTag(reg);
4966  __ bind(deferred->exit());
4967 }
4968 
4969 
4970 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4971  LOperand* value,
4972  LOperand* temp1,
4973  LOperand* temp2,
4974  IntegerSignedness signedness) {
4975  Label done, slow;
4976  Register reg = ToRegister(value);
4977  Register tmp = ToRegister(temp1);
4978  XMMRegister xmm_scratch = double_scratch0();
4979 
4980  if (signedness == SIGNED_INT32) {
4981  // There was overflow, so bits 30 and 31 of the original integer
4982  // disagree. Try to allocate a heap number in new space and store
4983  // the value in there. If that fails, call the runtime system.
4984  __ SmiUntag(reg);
4985  __ xor_(reg, 0x80000000);
4987  CpuFeatureScope feature_scope(masm(), SSE2);
4988  __ Cvtsi2sd(xmm_scratch, Operand(reg));
4989  } else {
4990  __ push(reg);
4991  __ fild_s(Operand(esp, 0));
4992  __ pop(reg);
4993  }
4994  } else {
4996  CpuFeatureScope feature_scope(masm(), SSE2);
4997  __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2));
4998  } else {
4999  // There's no fild variant for unsigned values, so zero-extend to a 64-bit
5000  // int manually.
5001  __ push(Immediate(0));
5002  __ push(reg);
5003  __ fild_d(Operand(esp, 0));
5004  __ pop(reg);
5005  __ pop(reg);
5006  }
5007  }
5008 
5009  if (FLAG_inline_new) {
5010  __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
5011  __ jmp(&done, Label::kNear);
5012  }
5013 
5014  // Slow case: Call the runtime system to do the number allocation.
5015  __ bind(&slow);
5016  {
5017  // TODO(3095996): Put a valid pointer value in the stack slot where the
5018  // result register is stored, as this register is in the pointer map, but
5019  // contains an integer value.
5020  __ Move(reg, Immediate(0));
5021 
5022  // Preserve the value of all registers.
5023  PushSafepointRegistersScope scope(this);
5024 
5025  // NumberTagI and NumberTagD use the context from the frame, rather than
5026  // the environment's HContext or HInlinedContext value.
5027  // They only call Runtime::kHiddenAllocateHeapNumber.
5028  // The corresponding HChange instructions are added in a phase that does
5029  // not have easy access to the local context.
5031  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5032  RecordSafepointWithRegisters(
5033  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5034  __ StoreToSafepointRegisterSlot(reg, eax);
5035  }
5036 
5037  // Done. Put the value in xmm_scratch into the value of the allocated heap
5038  // number.
5039  __ bind(&done);
5041  CpuFeatureScope feature_scope(masm(), SSE2);
5042  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
5043  } else {
5045  }
5046 }
5047 
5048 
5049 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5050  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5051  public:
5052  DeferredNumberTagD(LCodeGen* codegen,
5053  LNumberTagD* instr,
5054  const X87Stack& x87_stack)
5055  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5056  virtual void Generate() V8_OVERRIDE {
5057  codegen()->DoDeferredNumberTagD(instr_);
5058  }
5059  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5060  private:
5061  LNumberTagD* instr_;
5062  };
5063 
5064  Register reg = ToRegister(instr->result());
5065 
5066  bool use_sse2 = CpuFeatures::IsSupported(SSE2);
5067  if (!use_sse2) {
5068  // Put the value to the top of stack
5069  X87Register src = ToX87Register(instr->value());
5070  X87LoadForUsage(src);
5071  }
5072 
5073  DeferredNumberTagD* deferred =
5074  new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
5075  if (FLAG_inline_new) {
5076  Register tmp = ToRegister(instr->temp());
5077  __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
5078  } else {
5079  __ jmp(deferred->entry());
5080  }
5081  __ bind(deferred->exit());
5082  if (use_sse2) {
5083  CpuFeatureScope scope(masm(), SSE2);
5084  XMMRegister input_reg = ToDoubleRegister(instr->value());
5085  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5086  } else {
5088  }
5089 }
5090 
5091 
5092 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5093  // TODO(3095996): Get rid of this. For now, we need to make the
5094  // result register contain a valid pointer because it is already
5095  // contained in the register pointer map.
5096  Register reg = ToRegister(instr->result());
5097  __ Move(reg, Immediate(0));
5098 
5099  PushSafepointRegistersScope scope(this);
5100  // NumberTagI and NumberTagD use the context from the frame, rather than
5101  // the environment's HContext or HInlinedContext value.
5102  // They only call Runtime::kHiddenAllocateHeapNumber.
5103  // The corresponding HChange instructions are added in a phase that does
5104  // not have easy access to the local context.
5106  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5107  RecordSafepointWithRegisters(
5108  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5109  __ StoreToSafepointRegisterSlot(reg, eax);
5110 }
5111 
5112 
5113 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5114  HChange* hchange = instr->hydrogen();
5115  Register input = ToRegister(instr->value());
5116  if (hchange->CheckFlag(HValue::kCanOverflow) &&
5117  hchange->value()->CheckFlag(HValue::kUint32)) {
5118  __ test(input, Immediate(0xc0000000));
5119  DeoptimizeIf(not_zero, instr->environment());
5120  }
5121  __ SmiTag(input);
5122  if (hchange->CheckFlag(HValue::kCanOverflow) &&
5123  !hchange->value()->CheckFlag(HValue::kUint32)) {
5124  DeoptimizeIf(overflow, instr->environment());
5125  }
5126 }
5127 
5128 
5129 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5130  LOperand* input = instr->value();
5131  Register result = ToRegister(input);
5132  ASSERT(input->IsRegister() && input->Equals(instr->result()));
5133  if (instr->needs_check()) {
5134  __ test(result, Immediate(kSmiTagMask));
5135  DeoptimizeIf(not_zero, instr->environment());
5136  } else {
5137  __ AssertSmi(result);
5138  }
5139  __ SmiUntag(result);
5140 }
5141 
5142 
5143 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
5144  Register temp_reg,
5145  X87Register res_reg,
5146  bool can_convert_undefined_to_nan,
5147  bool deoptimize_on_minus_zero,
5148  LEnvironment* env,
5149  NumberUntagDMode mode) {
5150  Label load_smi, done;
5151 
5152  X87PrepareToWrite(res_reg);
5153  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5154  // Smi check.
5155  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5156 
5157  // Heap number map check.
5158  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5159  factory()->heap_number_map());
5160  if (!can_convert_undefined_to_nan) {
5161  DeoptimizeIf(not_equal, env);
5162  } else {
5163  Label heap_number, convert;
5164  __ j(equal, &heap_number, Label::kNear);
5165 
5166  // Convert undefined (or hole) to NaN.
5167  __ cmp(input_reg, factory()->undefined_value());
5168  DeoptimizeIf(not_equal, env);
5169 
5170  __ bind(&convert);
5171  ExternalReference nan =
5172  ExternalReference::address_of_canonical_non_hole_nan();
5173  __ fld_d(Operand::StaticVariable(nan));
5174  __ jmp(&done, Label::kNear);
5175 
5176  __ bind(&heap_number);
5177  }
5178  // Heap number to x87 conversion.
5179  __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5180  if (deoptimize_on_minus_zero) {
5181  __ fldz();
5182  __ FCmp();
5183  __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5184  __ j(not_zero, &done, Label::kNear);
5185 
5186  // Use general purpose registers to check if we have -0.0
5187  __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5188  __ test(temp_reg, Immediate(HeapNumber::kSignMask));
5189  __ j(zero, &done, Label::kNear);
5190 
5191  // Pop FPU stack before deoptimizing.
5192  __ fstp(0);
5193  DeoptimizeIf(not_zero, env);
5194  }
5195  __ jmp(&done, Label::kNear);
5196  } else {
5198  }
5199 
5200  __ bind(&load_smi);
5201  // Clobbering a temp is faster than re-tagging the
5202  // input register since we avoid dependencies.
5203  __ mov(temp_reg, input_reg);
5204  __ SmiUntag(temp_reg); // Untag smi before converting to float.
5205  __ push(temp_reg);
5206  __ fild_s(Operand(esp, 0));
5207  __ add(esp, Immediate(kPointerSize));
5208  __ bind(&done);
5209  X87CommitWrite(res_reg);
5210 }
5211 
5212 
5213 void LCodeGen::EmitNumberUntagD(Register input_reg,
5214  Register temp_reg,
5215  XMMRegister result_reg,
5216  bool can_convert_undefined_to_nan,
5217  bool deoptimize_on_minus_zero,
5218  LEnvironment* env,
5219  NumberUntagDMode mode) {
5220  Label convert, load_smi, done;
5221 
5222  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5223  // Smi check.
5224  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5225 
5226  // Heap number map check.
5227  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5228  factory()->heap_number_map());
5229  if (can_convert_undefined_to_nan) {
5230  __ j(not_equal, &convert, Label::kNear);
5231  } else {
5232  DeoptimizeIf(not_equal, env);
5233  }
5234 
5235  // Heap number to XMM conversion.
5236  __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5237 
5238  if (deoptimize_on_minus_zero) {
5239  XMMRegister xmm_scratch = double_scratch0();
5240  __ xorps(xmm_scratch, xmm_scratch);
5241  __ ucomisd(result_reg, xmm_scratch);
5242  __ j(not_zero, &done, Label::kNear);
5243  __ movmskpd(temp_reg, result_reg);
5244  __ test_b(temp_reg, 1);
5245  DeoptimizeIf(not_zero, env);
5246  }
5247  __ jmp(&done, Label::kNear);
5248 
5249  if (can_convert_undefined_to_nan) {
5250  __ bind(&convert);
5251 
5252  // Convert undefined (and hole) to NaN.
5253  __ cmp(input_reg, factory()->undefined_value());
5254  DeoptimizeIf(not_equal, env);
5255 
5256  ExternalReference nan =
5257  ExternalReference::address_of_canonical_non_hole_nan();
5258  __ movsd(result_reg, Operand::StaticVariable(nan));
5259  __ jmp(&done, Label::kNear);
5260  }
5261  } else {
5263  }
5264 
5265  __ bind(&load_smi);
5266  // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
5267  // input register since we avoid dependencies.
5268  __ mov(temp_reg, input_reg);
5269  __ SmiUntag(temp_reg); // Untag smi before converting to float.
5270  __ Cvtsi2sd(result_reg, Operand(temp_reg));
5271  __ bind(&done);
5272 }
5273 
5274 
5275 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5276  Register input_reg = ToRegister(instr->value());
5277 
5278  // The input was optimistically untagged; revert it.
5279  STATIC_ASSERT(kSmiTagSize == 1);
5280  __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
5281 
5282  if (instr->truncating()) {
5283  Label no_heap_number, check_bools, check_false;
5284 
5285  // Heap number map check.
5286  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5287  factory()->heap_number_map());
5288  __ j(not_equal, &no_heap_number, Label::kNear);
5289  __ TruncateHeapNumberToI(input_reg, input_reg);
5290  __ jmp(done);
5291 
5292  __ bind(&no_heap_number);
5293  // Check for Oddballs. Undefined/False is converted to zero and True to one
5294  // for truncating conversions.
5295  __ cmp(input_reg, factory()->undefined_value());
5296  __ j(not_equal, &check_bools, Label::kNear);
5297  __ Move(input_reg, Immediate(0));
5298  __ jmp(done);
5299 
5300  __ bind(&check_bools);
5301  __ cmp(input_reg, factory()->true_value());
5302  __ j(not_equal, &check_false, Label::kNear);
5303  __ Move(input_reg, Immediate(1));
5304  __ jmp(done);
5305 
5306  __ bind(&check_false);
5307  __ cmp(input_reg, factory()->false_value());
5308  __ RecordComment("Deferred TaggedToI: cannot truncate");
5309  DeoptimizeIf(not_equal, instr->environment());
5310  __ Move(input_reg, Immediate(0));
5311  } else {
5312  Label bailout;
5313  XMMRegister scratch = (instr->temp() != NULL)
5314  ? ToDoubleRegister(instr->temp())
5315  : no_xmm_reg;
5316  __ TaggedToI(input_reg, input_reg, scratch,
5317  instr->hydrogen()->GetMinusZeroMode(), &bailout);
5318  __ jmp(done);
5319  __ bind(&bailout);
5320  DeoptimizeIf(no_condition, instr->environment());
5321  }
5322 }
5323 
5324 
5325 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5326  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5327  public:
5328  DeferredTaggedToI(LCodeGen* codegen,
5329  LTaggedToI* instr,
5330  const X87Stack& x87_stack)
5331  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5332  virtual void Generate() V8_OVERRIDE {
5333  codegen()->DoDeferredTaggedToI(instr_, done());
5334  }
5335  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5336  private:
5337  LTaggedToI* instr_;
5338  };
5339 
5340  LOperand* input = instr->value();
5341  ASSERT(input->IsRegister());
5342  Register input_reg = ToRegister(input);
5343  ASSERT(input_reg.is(ToRegister(instr->result())));
5344 
5345  if (instr->hydrogen()->value()->representation().IsSmi()) {
5346  __ SmiUntag(input_reg);
5347  } else {
5348  DeferredTaggedToI* deferred =
5349  new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5350  // Optimistically untag the input.
5351  // If the input is a HeapObject, SmiUntag will set the carry flag.
5352  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5353  __ SmiUntag(input_reg);
5354  // Branch to deferred code if the input was tagged.
5355  // The deferred code will take care of restoring the tag.
5356  __ j(carry, deferred->entry());
5357  __ bind(deferred->exit());
5358  }
5359 }
5360 
5361 
5362 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5363  LOperand* input = instr->value();
5364  ASSERT(input->IsRegister());
5365  LOperand* temp = instr->temp();
5366  ASSERT(temp->IsRegister());
5367  LOperand* result = instr->result();
5368  ASSERT(result->IsDoubleRegister());
5369 
5370  Register input_reg = ToRegister(input);
5371  bool deoptimize_on_minus_zero =
5372  instr->hydrogen()->deoptimize_on_minus_zero();
5373  Register temp_reg = ToRegister(temp);
5374 
5375  HValue* value = instr->hydrogen()->value();
5376  NumberUntagDMode mode = value->representation().IsSmi()
5378 
5380  CpuFeatureScope scope(masm(), SSE2);
5381  XMMRegister result_reg = ToDoubleRegister(result);
5382  EmitNumberUntagD(input_reg,
5383  temp_reg,
5384  result_reg,
5385  instr->hydrogen()->can_convert_undefined_to_nan(),
5386  deoptimize_on_minus_zero,
5387  instr->environment(),
5388  mode);
5389  } else {
5390  EmitNumberUntagDNoSSE2(input_reg,
5391  temp_reg,
5392  ToX87Register(instr->result()),
5393  instr->hydrogen()->can_convert_undefined_to_nan(),
5394  deoptimize_on_minus_zero,
5395  instr->environment(),
5396  mode);
5397  }
5398 }
5399 
5400 
5401 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5402  LOperand* input = instr->value();
5403  ASSERT(input->IsDoubleRegister());
5404  LOperand* result = instr->result();
5405  ASSERT(result->IsRegister());
5406  Register result_reg = ToRegister(result);
5407 
5408  if (instr->truncating()) {
5410  CpuFeatureScope scope(masm(), SSE2);
5411  XMMRegister input_reg = ToDoubleRegister(input);
5412  __ TruncateDoubleToI(result_reg, input_reg);
5413  } else {
5414  X87Register input_reg = ToX87Register(input);
5415  X87Fxch(input_reg);
5416  __ TruncateX87TOSToI(result_reg);
5417  }
5418  } else {
5419  Label bailout, done;
5421  CpuFeatureScope scope(masm(), SSE2);
5422  XMMRegister input_reg = ToDoubleRegister(input);
5423  XMMRegister xmm_scratch = double_scratch0();
5424  __ DoubleToI(result_reg, input_reg, xmm_scratch,
5425  instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5426  } else {
5427  X87Register input_reg = ToX87Register(input);
5428  X87Fxch(input_reg);
5429  __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5430  &bailout, Label::kNear);
5431  }
5432  __ jmp(&done, Label::kNear);
5433  __ bind(&bailout);
5434  DeoptimizeIf(no_condition, instr->environment());
5435  __ bind(&done);
5436  }
5437 }
5438 
5439 
5440 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5441  LOperand* input = instr->value();
5442  ASSERT(input->IsDoubleRegister());
5443  LOperand* result = instr->result();
5444  ASSERT(result->IsRegister());
5445  Register result_reg = ToRegister(result);
5446 
5447  Label bailout, done;
5449  CpuFeatureScope scope(masm(), SSE2);
5450  XMMRegister input_reg = ToDoubleRegister(input);
5451  XMMRegister xmm_scratch = double_scratch0();
5452  __ DoubleToI(result_reg, input_reg, xmm_scratch,
5453  instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5454  } else {
5455  X87Register input_reg = ToX87Register(input);
5456  X87Fxch(input_reg);
5457  __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5458  &bailout, Label::kNear);
5459  }
5460  __ jmp(&done, Label::kNear);
5461  __ bind(&bailout);
5462  DeoptimizeIf(no_condition, instr->environment());
5463  __ bind(&done);
5464 
5465  __ SmiTag(result_reg);
5466  DeoptimizeIf(overflow, instr->environment());
5467 }
5468 
5469 
5470 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5471  LOperand* input = instr->value();
5472  __ test(ToOperand(input), Immediate(kSmiTagMask));
5473  DeoptimizeIf(not_zero, instr->environment());
5474 }
5475 
5476 
5477 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5478  if (!instr->hydrogen()->value()->IsHeapObject()) {
5479  LOperand* input = instr->value();
5480  __ test(ToOperand(input), Immediate(kSmiTagMask));
5481  DeoptimizeIf(zero, instr->environment());
5482  }
5483 }
5484 
5485 
5486 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5487  Register input = ToRegister(instr->value());
5488  Register temp = ToRegister(instr->temp());
5489 
5490  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
5491 
5492  if (instr->hydrogen()->is_interval_check()) {
5493  InstanceType first;
5494  InstanceType last;
5495  instr->hydrogen()->GetCheckInterval(&first, &last);
5496 
5498  static_cast<int8_t>(first));
5499 
5500  // If there is only one type in the interval check for equality.
5501  if (first == last) {
5502  DeoptimizeIf(not_equal, instr->environment());
5503  } else {
5504  DeoptimizeIf(below, instr->environment());
5505  // Omit check for the last type.
5506  if (last != LAST_TYPE) {
5508  static_cast<int8_t>(last));
5509  DeoptimizeIf(above, instr->environment());
5510  }
5511  }
5512  } else {
5513  uint8_t mask;
5514  uint8_t tag;
5515  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5516 
5517  if (IsPowerOf2(mask)) {
5518  ASSERT(tag == 0 || IsPowerOf2(tag));
5519  __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
5520  DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
5521  } else {
5522  __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5523  __ and_(temp, mask);
5524  __ cmp(temp, tag);
5525  DeoptimizeIf(not_equal, instr->environment());
5526  }
5527  }
5528 }
5529 
5530 
5531 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5532  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5533  if (instr->hydrogen()->object_in_new_space()) {
5534  Register reg = ToRegister(instr->value());
5535  Handle<Cell> cell = isolate()->factory()->NewCell(object);
5536  __ cmp(reg, Operand::ForCell(cell));
5537  } else {
5538  Operand operand = ToOperand(instr->value());
5539  __ cmp(operand, object);
5540  }
5541  DeoptimizeIf(not_equal, instr->environment());
5542 }
5543 
5544 
5545 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5546  {
5547  PushSafepointRegistersScope scope(this);
5548  __ push(object);
5549  __ xor_(esi, esi);
5550  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5551  RecordSafepointWithRegisters(
5552  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5553 
5554  __ test(eax, Immediate(kSmiTagMask));
5555  }
5556  DeoptimizeIf(zero, instr->environment());
5557 }
5558 
5559 
5560 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5561  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5562  public:
5563  DeferredCheckMaps(LCodeGen* codegen,
5564  LCheckMaps* instr,
5565  Register object,
5566  const X87Stack& x87_stack)
5567  : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5568  SetExit(check_maps());
5569  }
5570  virtual void Generate() V8_OVERRIDE {
5571  codegen()->DoDeferredInstanceMigration(instr_, object_);
5572  }
5573  Label* check_maps() { return &check_maps_; }
5574  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5575  private:
5576  LCheckMaps* instr_;
5577  Label check_maps_;
5578  Register object_;
5579  };
5580 
5581  if (instr->hydrogen()->CanOmitMapChecks()) return;
5582 
5583  LOperand* input = instr->value();
5584  ASSERT(input->IsRegister());
5585  Register reg = ToRegister(input);
5586 
5587  DeferredCheckMaps* deferred = NULL;
5588  if (instr->hydrogen()->has_migration_target()) {
5589  deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5590  __ bind(deferred->check_maps());
5591  }
5592 
5593  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5594  Label success;
5595  for (int i = 0; i < map_set.size() - 1; i++) {
5596  Handle<Map> map = map_set.at(i).handle();
5597  __ CompareMap(reg, map);
5598  __ j(equal, &success, Label::kNear);
5599  }
5600 
5601  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5602  __ CompareMap(reg, map);
5603  if (instr->hydrogen()->has_migration_target()) {
5604  __ j(not_equal, deferred->entry());
5605  } else {
5606  DeoptimizeIf(not_equal, instr->environment());
5607  }
5608 
5609  __ bind(&success);
5610 }
5611 
5612 
5613 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5614  CpuFeatureScope scope(masm(), SSE2);
5615  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5616  XMMRegister xmm_scratch = double_scratch0();
5617  Register result_reg = ToRegister(instr->result());
5618  __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5619 }
5620 
5621 
5622 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5623  ASSERT(instr->unclamped()->Equals(instr->result()));
5624  Register value_reg = ToRegister(instr->result());
5625  __ ClampUint8(value_reg);
5626 }
5627 
5628 
5629 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5630  CpuFeatureScope scope(masm(), SSE2);
5631 
5632  ASSERT(instr->unclamped()->Equals(instr->result()));
5633  Register input_reg = ToRegister(instr->unclamped());
5634  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5635  XMMRegister xmm_scratch = double_scratch0();
5636  Label is_smi, done, heap_number;
5637 
5638  __ JumpIfSmi(input_reg, &is_smi);
5639 
5640  // Check for heap number
5641  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5642  factory()->heap_number_map());
5643  __ j(equal, &heap_number, Label::kNear);
5644 
5645  // Check for undefined. Undefined is converted to zero for clamping
5646  // conversions.
5647  __ cmp(input_reg, factory()->undefined_value());
5648  DeoptimizeIf(not_equal, instr->environment());
5649  __ mov(input_reg, 0);
5650  __ jmp(&done, Label::kNear);
5651 
5652  // Heap number
5653  __ bind(&heap_number);
5654  __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5655  __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5656  __ jmp(&done, Label::kNear);
5657 
5658  // smi
5659  __ bind(&is_smi);
5660  __ SmiUntag(input_reg);
5661  __ ClampUint8(input_reg);
5662  __ bind(&done);
5663 }
5664 
5665 
5666 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5667  Register input_reg = ToRegister(instr->unclamped());
5668  Register result_reg = ToRegister(instr->result());
5669  Register scratch = ToRegister(instr->scratch());
5670  Register scratch2 = ToRegister(instr->scratch2());
5671  Register scratch3 = ToRegister(instr->scratch3());
5672  Label is_smi, done, heap_number, valid_exponent,
5673  largest_value, zero_result, maybe_nan_or_infinity;
5674 
5675  __ JumpIfSmi(input_reg, &is_smi);
5676 
5677  // Check for heap number
5678  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5679  factory()->heap_number_map());
5680  __ j(equal, &heap_number, Label::kNear);
5681 
5682  // Check for undefined. Undefined is converted to zero for clamping
5683  // conversions.
5684  __ cmp(input_reg, factory()->undefined_value());
5685  DeoptimizeIf(not_equal, instr->environment());
5686  __ jmp(&zero_result, Label::kNear);
5687 
5688  // Heap number
5689  __ bind(&heap_number);
5690 
5691  // Surprisingly, all of the hand-crafted bit-manipulations below are much
5692  // faster than the x86 FPU built-in instruction, especially since "banker's
5693  // rounding" would be additionally very expensive
5694 
5695  // Get exponent word.
5696  __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5697  __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5698 
5699  // Test for negative values --> clamp to zero
5700  __ test(scratch, scratch);
5701  __ j(negative, &zero_result, Label::kNear);
5702 
5703  // Get exponent alone in scratch2.
5704  __ mov(scratch2, scratch);
5705  __ and_(scratch2, HeapNumber::kExponentMask);
5706  __ shr(scratch2, HeapNumber::kExponentShift);
5707  __ j(zero, &zero_result, Label::kNear);
5708  __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5709  __ j(negative, &zero_result, Label::kNear);
5710 
5711  const uint32_t non_int8_exponent = 7;
5712  __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5713  // If the exponent is too big, check for special values.
5714  __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5715 
5716  __ bind(&valid_exponent);
5717  // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5718  // < 7. The shift bias is the number of bits to shift the mantissa such that
5719  // with an exponent of 7 such the that top-most one is in bit 30, allowing
5720  // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5721  // 1).
5722  int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5723  __ lea(result_reg, MemOperand(scratch2, shift_bias));
5724  // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
5725  // top bits of the mantissa.
5726  __ and_(scratch, HeapNumber::kMantissaMask);
5727  // Put back the implicit 1 of the mantissa
5728  __ or_(scratch, 1 << HeapNumber::kExponentShift);
5729  // Shift up to round
5730  __ shl_cl(scratch);
5731  // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5732  // use the bit in the "ones" place and add it to the "halves" place, which has
5733  // the effect of rounding to even.
5734  __ mov(scratch2, scratch);
5735  const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5736  const uint32_t one_bit_shift = one_half_bit_shift + 1;
5737  __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5738  __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5739  Label no_round;
5740  __ j(less, &no_round, Label::kNear);
5741  Label round_up;
5742  __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5743  __ j(greater, &round_up, Label::kNear);
5744  __ test(scratch3, scratch3);
5745  __ j(not_zero, &round_up, Label::kNear);
5746  __ mov(scratch2, scratch);
5747  __ and_(scratch2, Immediate(1 << one_bit_shift));
5748  __ shr(scratch2, 1);
5749  __ bind(&round_up);
5750  __ add(scratch, scratch2);
5751  __ j(overflow, &largest_value, Label::kNear);
5752  __ bind(&no_round);
5753  __ shr(scratch, 23);
5754  __ mov(result_reg, scratch);
5755  __ jmp(&done, Label::kNear);
5756 
5757  __ bind(&maybe_nan_or_infinity);
5758  // Check for NaN/Infinity, all other values map to 255
5759  __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5760  __ j(not_equal, &largest_value, Label::kNear);
5761 
5762  // Check for NaN, which differs from Infinity in that at least one mantissa
5763  // bit is set.
5764  __ and_(scratch, HeapNumber::kMantissaMask);
5765  __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5766  __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
5767  // Infinity -> Fall through to map to 255.
5768 
5769  __ bind(&largest_value);
5770  __ mov(result_reg, Immediate(255));
5771  __ jmp(&done, Label::kNear);
5772 
5773  __ bind(&zero_result);
5774  __ xor_(result_reg, result_reg);
5775  __ jmp(&done, Label::kNear);
5776 
5777  // smi
5778  __ bind(&is_smi);
5779  if (!input_reg.is(result_reg)) {
5780  __ mov(result_reg, input_reg);
5781  }
5782  __ SmiUntag(result_reg);
5783  __ ClampUint8(result_reg);
5784  __ bind(&done);
5785 }
5786 
5787 
5788 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5789  CpuFeatureScope scope(masm(), SSE2);
5790  XMMRegister value_reg = ToDoubleRegister(instr->value());
5791  Register result_reg = ToRegister(instr->result());
5792  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5794  CpuFeatureScope scope2(masm(), SSE4_1);
5795  __ pextrd(result_reg, value_reg, 1);
5796  } else {
5797  XMMRegister xmm_scratch = double_scratch0();
5798  __ pshufd(xmm_scratch, value_reg, 1);
5799  __ movd(result_reg, xmm_scratch);
5800  }
5801  } else {
5802  __ movd(result_reg, value_reg);
5803  }
5804 }
5805 
5806 
5807 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5808  Register hi_reg = ToRegister(instr->hi());
5809  Register lo_reg = ToRegister(instr->lo());
5810  XMMRegister result_reg = ToDoubleRegister(instr->result());
5811  CpuFeatureScope scope(masm(), SSE2);
5812 
5814  CpuFeatureScope scope2(masm(), SSE4_1);
5815  __ movd(result_reg, lo_reg);
5816  __ pinsrd(result_reg, hi_reg, 1);
5817  } else {
5818  XMMRegister xmm_scratch = double_scratch0();
5819  __ movd(result_reg, hi_reg);
5820  __ psllq(result_reg, 32);
5821  __ movd(xmm_scratch, lo_reg);
5822  __ orps(result_reg, xmm_scratch);
5823  }
5824 }
5825 
5826 
5827 void LCodeGen::DoAllocate(LAllocate* instr) {
5828  class DeferredAllocate V8_FINAL : public LDeferredCode {
5829  public:
5830  DeferredAllocate(LCodeGen* codegen,
5831  LAllocate* instr,
5832  const X87Stack& x87_stack)
5833  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5834  virtual void Generate() V8_OVERRIDE {
5835  codegen()->DoDeferredAllocate(instr_);
5836  }
5837  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5838  private:
5839  LAllocate* instr_;
5840  };
5841 
5842  DeferredAllocate* deferred =
5843  new(zone()) DeferredAllocate(this, instr, x87_stack_);
5844 
5845  Register result = ToRegister(instr->result());
5846  Register temp = ToRegister(instr->temp());
5847 
5848  // Allocate memory for the object.
5849  AllocationFlags flags = TAG_OBJECT;
5850  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5851  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5852  }
5853  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5854  ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5855  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5856  flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5857  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5858  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5859  flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5860  }
5861 
5862  if (instr->size()->IsConstantOperand()) {
5863  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5864  if (size <= Page::kMaxRegularHeapObjectSize) {
5865  __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5866  } else {
5867  __ jmp(deferred->entry());
5868  }
5869  } else {
5870  Register size = ToRegister(instr->size());
5871  __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5872  }
5873 
5874  __ bind(deferred->exit());
5875 
5876  if (instr->hydrogen()->MustPrefillWithFiller()) {
5877  if (instr->size()->IsConstantOperand()) {
5878  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5879  __ mov(temp, (size / kPointerSize) - 1);
5880  } else {
5881  temp = ToRegister(instr->size());
5882  __ shr(temp, kPointerSizeLog2);
5883  __ dec(temp);
5884  }
5885  Label loop;
5886  __ bind(&loop);
5887  __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5888  isolate()->factory()->one_pointer_filler_map());
5889  __ dec(temp);
5890  __ j(not_zero, &loop);
5891  }
5892 }
5893 
5894 
5895 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5896  Register result = ToRegister(instr->result());
5897 
5898  // TODO(3095996): Get rid of this. For now, we need to make the
5899  // result register contain a valid pointer because it is already
5900  // contained in the register pointer map.
5901  __ Move(result, Immediate(Smi::FromInt(0)));
5902 
5903  PushSafepointRegistersScope scope(this);
5904  if (instr->size()->IsRegister()) {
5905  Register size = ToRegister(instr->size());
5906  ASSERT(!size.is(result));
5907  __ SmiTag(ToRegister(instr->size()));
5908  __ push(size);
5909  } else {
5910  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5911  __ push(Immediate(Smi::FromInt(size)));
5912  }
5913 
5914  int flags = AllocateDoubleAlignFlag::encode(
5915  instr->hydrogen()->MustAllocateDoubleAligned());
5916  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5917  ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5918  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5920  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5921  ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5923  } else {
5924  flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5925  }
5926  __ push(Immediate(Smi::FromInt(flags)));
5927 
5928  CallRuntimeFromDeferred(
5929  Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5930  __ StoreToSafepointRegisterSlot(result, eax);
5931 }
5932 
5933 
5934 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5935  ASSERT(ToRegister(instr->value()).is(eax));
5936  __ push(eax);
5937  CallRuntime(Runtime::kToFastProperties, 1, instr);
5938 }
5939 
5940 
5941 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5942  ASSERT(ToRegister(instr->context()).is(esi));
5943  Label materialized;
5944  // Registers will be used as follows:
5945  // ecx = literals array.
5946  // ebx = regexp literal.
5947  // eax = regexp literal clone.
5948  // esi = context.
5949  int literal_offset =
5950  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5951  __ LoadHeapObject(ecx, instr->hydrogen()->literals());
5952  __ mov(ebx, FieldOperand(ecx, literal_offset));
5953  __ cmp(ebx, factory()->undefined_value());
5954  __ j(not_equal, &materialized, Label::kNear);
5955 
5956  // Create regexp literal using runtime function
5957  // Result will be in eax.
5958  __ push(ecx);
5959  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
5960  __ push(Immediate(instr->hydrogen()->pattern()));
5961  __ push(Immediate(instr->hydrogen()->flags()));
5962  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5963  __ mov(ebx, eax);
5964 
5965  __ bind(&materialized);
5967  Label allocated, runtime_allocate;
5968  __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
5969  __ jmp(&allocated, Label::kNear);
5970 
5971  __ bind(&runtime_allocate);
5972  __ push(ebx);
5973  __ push(Immediate(Smi::FromInt(size)));
5974  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5975  __ pop(ebx);
5976 
5977  __ bind(&allocated);
5978  // Copy the content into the newly allocated memory.
5979  // (Unroll copy loop once for better throughput).
5980  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5981  __ mov(edx, FieldOperand(ebx, i));
5982  __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
5983  __ mov(FieldOperand(eax, i), edx);
5984  __ mov(FieldOperand(eax, i + kPointerSize), ecx);
5985  }
5986  if ((size % (2 * kPointerSize)) != 0) {
5987  __ mov(edx, FieldOperand(ebx, size - kPointerSize));
5988  __ mov(FieldOperand(eax, size - kPointerSize), edx);
5989  }
5990 }
5991 
5992 
5993 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5994  ASSERT(ToRegister(instr->context()).is(esi));
5995  // Use the fast case closure allocation code that allocates in new
5996  // space for nested functions that don't need literals cloning.
5997  bool pretenure = instr->hydrogen()->pretenure();
5998  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5999  FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
6000  instr->hydrogen()->is_generator());
6001  __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
6002  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
6003  } else {
6004  __ push(esi);
6005  __ push(Immediate(instr->hydrogen()->shared_info()));
6006  __ push(Immediate(pretenure ? factory()->true_value()
6007  : factory()->false_value()));
6008  CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
6009  }
6010 }
6011 
6012 
6013 void LCodeGen::DoTypeof(LTypeof* instr) {
6014  ASSERT(ToRegister(instr->context()).is(esi));
6015  LOperand* input = instr->value();
6016  EmitPushTaggedOperand(input);
6017  CallRuntime(Runtime::kTypeof, 1, instr);
6018 }
6019 
6020 
6021 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6022  Register input = ToRegister(instr->value());
6023  Condition final_branch_condition = EmitTypeofIs(instr, input);
6024  if (final_branch_condition != no_condition) {
6025  EmitBranch(instr, final_branch_condition);
6026  }
6027 }
6028 
6029 
6030 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
6031  Label* true_label = instr->TrueLabel(chunk_);
6032  Label* false_label = instr->FalseLabel(chunk_);
6033  Handle<String> type_name = instr->type_literal();
6034  int left_block = instr->TrueDestination(chunk_);
6035  int right_block = instr->FalseDestination(chunk_);
6036  int next_block = GetNextEmittedBlock();
6037 
6038  Label::Distance true_distance = left_block == next_block ? Label::kNear
6039  : Label::kFar;
6040  Label::Distance false_distance = right_block == next_block ? Label::kNear
6041  : Label::kFar;
6042  Condition final_branch_condition = no_condition;
6043  if (type_name->Equals(heap()->number_string())) {
6044  __ JumpIfSmi(input, true_label, true_distance);
6046  factory()->heap_number_map());
6047  final_branch_condition = equal;
6048 
6049  } else if (type_name->Equals(heap()->string_string())) {
6050  __ JumpIfSmi(input, false_label, false_distance);
6051  __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
6052  __ j(above_equal, false_label, false_distance);
6053  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6054  1 << Map::kIsUndetectable);
6055  final_branch_condition = zero;
6056 
6057  } else if (type_name->Equals(heap()->symbol_string())) {
6058  __ JumpIfSmi(input, false_label, false_distance);
6059  __ CmpObjectType(input, SYMBOL_TYPE, input);
6060  final_branch_condition = equal;
6061 
6062  } else if (type_name->Equals(heap()->boolean_string())) {
6063  __ cmp(input, factory()->true_value());
6064  __ j(equal, true_label, true_distance);
6065  __ cmp(input, factory()->false_value());
6066  final_branch_condition = equal;
6067 
6068  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
6069  __ cmp(input, factory()->null_value());
6070  final_branch_condition = equal;
6071 
6072  } else if (type_name->Equals(heap()->undefined_string())) {
6073  __ cmp(input, factory()->undefined_value());
6074  __ j(equal, true_label, true_distance);
6075  __ JumpIfSmi(input, false_label, false_distance);
6076  // Check for undetectable objects => true.
6077  __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
6078  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6079  1 << Map::kIsUndetectable);
6080  final_branch_condition = not_zero;
6081 
6082  } else if (type_name->Equals(heap()->function_string())) {
6084  __ JumpIfSmi(input, false_label, false_distance);
6085  __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
6086  __ j(equal, true_label, true_distance);
6087  __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
6088  final_branch_condition = equal;
6089 
6090  } else if (type_name->Equals(heap()->object_string())) {
6091  __ JumpIfSmi(input, false_label, false_distance);
6092  if (!FLAG_harmony_typeof) {
6093  __ cmp(input, factory()->null_value());
6094  __ j(equal, true_label, true_distance);
6095  }
6096  __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
6097  __ j(below, false_label, false_distance);
6098  __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
6099  __ j(above, false_label, false_distance);
6100  // Check for undetectable objects => false.
6101  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6102  1 << Map::kIsUndetectable);
6103  final_branch_condition = zero;
6104 
6105  } else {
6106  __ jmp(false_label, false_distance);
6107  }
6108  return final_branch_condition;
6109 }
6110 
6111 
6112 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6113  Register temp = ToRegister(instr->temp());
6114 
6115  EmitIsConstructCall(temp);
6116  EmitBranch(instr, equal);
6117 }
6118 
6119 
6120 void LCodeGen::EmitIsConstructCall(Register temp) {
6121  // Get the frame pointer for the calling frame.
6122  __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
6123 
6124  // Skip the arguments adaptor frame if it exists.
6125  Label check_frame_marker;
6126  __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6128  __ j(not_equal, &check_frame_marker, Label::kNear);
6129  __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6130 
6131  // Check the marker in the calling frame.
6132  __ bind(&check_frame_marker);
6133  __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6134  Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
6135 }
6136 
6137 
6138 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6139  if (!info()->IsStub()) {
6140  // Ensure that we have enough space after the previous lazy-bailout
6141  // instruction for patching the code here.
6142  int current_pc = masm()->pc_offset();
6143  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6144  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6145  __ Nop(padding_size);
6146  }
6147  }
6148  last_lazy_deopt_pc_ = masm()->pc_offset();
6149 }
6150 
6151 
6152 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6153  last_lazy_deopt_pc_ = masm()->pc_offset();
6154  ASSERT(instr->HasEnvironment());
6155  LEnvironment* env = instr->environment();
6156  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6157  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6158 }
6159 
6160 
6161 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6162  Deoptimizer::BailoutType type = instr->hydrogen()->type();
6163  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6164  // needed return address), even though the implementation of LAZY and EAGER is
6165  // now identical. When LAZY is eventually completely folded into EAGER, remove
6166  // the special case below.
6167  if (info()->IsStub() && type == Deoptimizer::EAGER) {
6168  type = Deoptimizer::LAZY;
6169  }
6170  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
6171  DeoptimizeIf(no_condition, instr->environment(), type);
6172 }
6173 
6174 
6175 void LCodeGen::DoDummy(LDummy* instr) {
6176  // Nothing to see here, move on!
6177 }
6178 
6179 
6180 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6181  // Nothing to see here, move on!
6182 }
6183 
6184 
6185 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6186  PushSafepointRegistersScope scope(this);
6188  __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
6189  RecordSafepointWithLazyDeopt(
6190  instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
6191  ASSERT(instr->HasEnvironment());
6192  LEnvironment* env = instr->environment();
6193  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6194 }
6195 
6196 
6197 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6198  class DeferredStackCheck V8_FINAL : public LDeferredCode {
6199  public:
6200  DeferredStackCheck(LCodeGen* codegen,
6201  LStackCheck* instr,
6202  const X87Stack& x87_stack)
6203  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
6204  virtual void Generate() V8_OVERRIDE {
6205  codegen()->DoDeferredStackCheck(instr_);
6206  }
6207  virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6208  private:
6209  LStackCheck* instr_;
6210  };
6211 
6212  ASSERT(instr->HasEnvironment());
6213  LEnvironment* env = instr->environment();
6214  // There is no LLazyBailout instruction for stack-checks. We have to
6215  // prepare for lazy deoptimization explicitly here.
6216  if (instr->hydrogen()->is_function_entry()) {
6217  // Perform stack overflow check.
6218  Label done;
6219  ExternalReference stack_limit =
6220  ExternalReference::address_of_stack_limit(isolate());
6221  __ cmp(esp, Operand::StaticVariable(stack_limit));
6222  __ j(above_equal, &done, Label::kNear);
6223 
6224  ASSERT(instr->context()->IsRegister());
6225  ASSERT(ToRegister(instr->context()).is(esi));
6226  CallCode(isolate()->builtins()->StackCheck(),
6227  RelocInfo::CODE_TARGET,
6228  instr);
6229  __ bind(&done);
6230  } else {
6231  ASSERT(instr->hydrogen()->is_backwards_branch());
6232  // Perform stack overflow check if this goto needs it before jumping.
6233  DeferredStackCheck* deferred_stack_check =
6234  new(zone()) DeferredStackCheck(this, instr, x87_stack_);
6235  ExternalReference stack_limit =
6236  ExternalReference::address_of_stack_limit(isolate());
6237  __ cmp(esp, Operand::StaticVariable(stack_limit));
6238  __ j(below, deferred_stack_check->entry());
6239  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6240  __ bind(instr->done_label());
6241  deferred_stack_check->SetExit(instr->done_label());
6242  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6243  // Don't record a deoptimization index for the safepoint here.
6244  // This will be done explicitly when emitting call and the safepoint in
6245  // the deferred code.
6246  }
6247 }
6248 
6249 
6250 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6251  // This is a pseudo-instruction that ensures that the environment here is
6252  // properly registered for deoptimization and records the assembler's PC
6253  // offset.
6254  LEnvironment* environment = instr->environment();
6255 
6256  // If the environment were already registered, we would have no way of
6257  // backpatching it with the spill slot operands.
6258  ASSERT(!environment->HasBeenRegistered());
6259  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6260 
6261  GenerateOsrPrologue();
6262 }
6263 
6264 
6265 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6266  ASSERT(ToRegister(instr->context()).is(esi));
6267  __ cmp(eax, isolate()->factory()->undefined_value());
6268  DeoptimizeIf(equal, instr->environment());
6269 
6270  __ cmp(eax, isolate()->factory()->null_value());
6271  DeoptimizeIf(equal, instr->environment());
6272 
6273  __ test(eax, Immediate(kSmiTagMask));
6274  DeoptimizeIf(zero, instr->environment());
6275 
6277  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
6278  DeoptimizeIf(below_equal, instr->environment());
6279 
6280  Label use_cache, call_runtime;
6281  __ CheckEnumCache(&call_runtime);
6282 
6284  __ jmp(&use_cache, Label::kNear);
6285 
6286  // Get the set of properties to enumerate.
6287  __ bind(&call_runtime);
6288  __ push(eax);
6289  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6290 
6292  isolate()->factory()->meta_map());
6293  DeoptimizeIf(not_equal, instr->environment());
6294  __ bind(&use_cache);
6295 }
6296 
6297 
6298 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6299  Register map = ToRegister(instr->map());
6300  Register result = ToRegister(instr->result());
6301  Label load_cache, done;
6302  __ EnumLength(result, map);
6303  __ cmp(result, Immediate(Smi::FromInt(0)));
6304  __ j(not_equal, &load_cache, Label::kNear);
6305  __ mov(result, isolate()->factory()->empty_fixed_array());
6306  __ jmp(&done, Label::kNear);
6307 
6308  __ bind(&load_cache);
6309  __ LoadInstanceDescriptors(map, result);
6310  __ mov(result,
6312  __ mov(result,
6313  FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6314  __ bind(&done);
6315  __ test(result, result);
6316  DeoptimizeIf(equal, instr->environment());
6317 }
6318 
6319 
6320 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6321  Register object = ToRegister(instr->value());
6322  __ cmp(ToRegister(instr->map()),
6324  DeoptimizeIf(not_equal, instr->environment());
6325 }
6326 
6327 
6328 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6329  Register object = ToRegister(instr->object());
6330  Register index = ToRegister(instr->index());
6331 
6332  Label out_of_object, done;
6333  __ cmp(index, Immediate(0));
6334  __ j(less, &out_of_object, Label::kNear);
6335  __ mov(object, FieldOperand(object,
6336  index,
6339  __ jmp(&done, Label::kNear);
6340 
6341  __ bind(&out_of_object);
6342  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
6343  __ neg(index);
6344  // Index is now equal to out of object property index plus 1.
6345  __ mov(object, FieldOperand(object,
6346  index,
6348  FixedArray::kHeaderSize - kPointerSize));
6349  __ bind(&done);
6350 }
6351 
6352 
6353 #undef __
6354 
6355 } } // namespace v8::internal
6356 
6357 #endif // V8_TARGET_ARCH_IA32
byte * Address
Definition: globals.h:186
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1267
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void FinishCode(Handle< Code > code)
static const int kHashFieldOffset
Definition: objects.h:8629
const int kMinInt
Definition: globals.h:249
static const int kBitFieldOffset
Definition: objects.h:6461
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
const intptr_t kSmiTagMask
Definition: v8.h:5480
static const int kCodeEntryOffset
Definition: objects.h:7518
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
static int SlotOffset(int index)
Definition: contexts.h:498
static Representation Smi()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static double hole_nan_as_double()
Definition: objects-inl.h:2161
static const int kEnumCacheOffset
Definition: objects.h:3499
static const int kMaxUtf16CodeUnit
Definition: objects.h:8916
const uint32_t kTwoByteStringTag
Definition: objects.h:610
static const uint32_t kExponentMask
Definition: objects.h:1981
int StackSlotOffset(int index)
Definition: lithium.cc:240
RegisterType type() const
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
const int KB
Definition: globals.h:245
static Handle< Code > GetUninitialized(Isolate *isolate, Token::Value op)
Definition: ic.cc:2489
static const int kDataOffset
Definition: objects.h:4970
const Register esp
bool IsSmi(LConstantOperand *op) const
static Handle< T > cast(Handle< S > that)
Definition: handles.h:75
Operand HighOperand(LOperand *op)
static const int kGlobalReceiverOffset
Definition: objects.h:7613
static Representation Integer32()
void X87PrepareBinaryOp(X87Register left, X87Register right, X87Register result)
void X87LoadForUsage(X87Register reg)
static const int kNativeByteOffset
Definition: objects.h:7267
static const int kExponentBias
Definition: objects.h:1985
int int32_t
Definition: unicode.cc:47
static XMMRegister FromAllocationIndex(int index)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8673
static bool IsSupported(CpuFeature f)
Definition: assembler-arm.h:68
static const int kStrictModeBitWithinByte
Definition: objects.h:7258
const int kNoAlignmentPadding
Definition: frames-ia32.h:56
X87Register ToX87Register(LOperand *op) const
static X87Register FromAllocationIndex(int index)
AllocationSiteOverrideMode
Definition: code-stubs.h:759
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
static const int kSize
Definition: objects.h:7922
#define ASSERT(condition)
Definition: checks.h:329
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
static const int kContextOffset
Definition: frames.h:185
const int kPointerSizeLog2
Definition: globals.h:281
static bool IsSafeForSnapshot(CpuFeature f)
Definition: assembler-arm.h:78
static const int kInObjectFieldCount
Definition: objects.h:7976
const uint32_t kStringRepresentationMask
Definition: objects.h:615
MemOperand GlobalObjectOperand()
static const int kCallerFPOffset
Definition: frames.h:188
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
static Handle< Code > initialize_stub(Isolate *isolate, ExtraICState extra_state)
Definition: ic.cc:782
static const int kInstanceClassNameOffset
Definition: objects.h:7107
const bool FLAG_enable_slow_asserts
Definition: checks.h:307
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Variable * parameter(int index) const
Definition: scopes.h:333
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:7523
const int kAlignmentPaddingPushed
Definition: frames-ia32.h:57
const Register edi
void DoDeferredStackCheck(LStackCheck *instr)
void X87PrepareToWrite(X87Register reg)
int LookupDestination(int block_id) const
Condition ReverseCondition(Condition cond)
Immediate ToImmediate(LOperand *op, const Representation &r) const
#define IN
Operand ToOperand(LOperand *op)
const uint32_t kSlotsZapValue
Definition: v8globals.h:86
const Register ebp
#define UNREACHABLE()
Definition: checks.h:52
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:261
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static const int kInfinityOrNanExponent
Definition: objects.h:1987
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
static const int kNumAllocatableRegisters
static const int kLengthOffset
Definition: objects.h:8905
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kExponentShift
Definition: objects.h:1986
const Register eax
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const int kDoubleSize
Definition: globals.h:266
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:7098
void DoDeferredNumberTagD(LNumberTagD *instr)
static uint32_t update(uint32_tprevious, AllocationSpacevalue)
Definition: utils.h:296
const XMMRegister xmm1
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:683
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
Definition: flags.cc:211
static void MaybeCallEntryHook(MacroAssembler *masm)
Operand FieldOperand(Register object, int offset)
const Register ecx
DwVfpRegister ToDoubleRegister(LOperand *op) const
const int kHeapObjectTag
Definition: v8.h:5473
void DoDeferredAllocate(LAllocate *instr)
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
Definition: deoptimizer.cc:701
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
#define __
static const int kCallerSPOffset
Definition: frames.h:190
static const int kDynamicAlignmentStateOffset
Definition: frames-ia32.h:103
static const int kCacheStampOffset
Definition: objects.h:7787
bool IsFixedTypedArrayElementsKind(ElementsKind kind)
static const int kPropertiesOffset
Definition: objects.h:2755
void X87Fxch(X87Register reg, int other_slot=0)
const int kAlignmentZapValue
Definition: frames-ia32.h:58
#define ASSERT_LE(v1, v2)
Definition: checks.h:334
int32_t ToInteger32(LConstantOperand *op) const
int num_parameters() const
Definition: scopes.h:338
bool IsInteger32(LConstantOperand *op) const
static const int kMarkerOffset
Definition: frames.h:184
bool IsFastSmiElementsKind(ElementsKind kind)
void X87Free(X87Register reg)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
Handle< Object > ToHandle(LConstantOperand *op) const
static void EnsureRelocSpaceForLazyDeoptimization(Handle< Code > code)
static const int kHeaderSize
Definition: objects.h:9042
static const int kNativeBitWithinByte
Definition: objects.h:7261
bool IsPowerOf2(T x)
Definition: utils.h:51
#define STATIC_ASCII_VECTOR(x)
Definition: utils.h:570
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
const int kBitsPerInt
Definition: globals.h:290
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
Definition: utils.h:574
static int OffsetOfElementAt(int index)
Definition: objects.h:3070
static int SizeFor(int length)
Definition: objects.h:3067
bool NeedsDeferredFrame() const
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:3016
AllocationFlags
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
Definition: shell.cc:171
static const int kMapOffset
Definition: objects.h:1890
static const int kValueOffset
Definition: objects.h:7779
bool is(Register reg) const
const Register ebx
#define V8_OVERRIDE
Definition: v8config.h:402
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
static const int kHasNonInstancePrototype
Definition: objects.h:6468
void WriteTranslation(LEnvironment *environment, Translation *translation)
static const uint32_t kSignMask
Definition: objects.h:1980
static const int kNotDeoptimizationEntry
Definition: deoptimizer.h:258
static const int kStrictModeByteOffset
Definition: objects.h:7265
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
const int kSmiTagSize
Definition: v8.h:5479
static const int kHeaderSize
Definition: objects.h:5604
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
void X87Mov(X87Register reg, Operand src, X87OperandType operand=kX87DoubleOperand)
#define no_xmm_reg
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
const Register esi
T Abs(T a)
Definition: utils.h:241
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
Definition: objects-inl.h:5190
static const int kConstructorOffset
Definition: objects.h:6428
static const uint32_t kMantissaMask
Definition: objects.h:1982
const uint32_t kOneByteStringTag
Definition: objects.h:611
const int kSmiTag
Definition: v8.h:5478
#define ASSERT_NE(v1, v2)
Definition: checks.h:331
static const int kIsUndetectable
Definition: objects.h:6472
void X87CommitWrite(X87Register reg)
static const int kHeaderSize
Definition: objects.h:2757
Register ToRegister(LOperand *op) const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
void DoDeferredTaggedToI(LTaggedToI *instr)
static const int kPrototypeOffset
Definition: objects.h:6427
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static const int kMaxLength
Definition: objects.h:8922
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
bool IsNextEmittedBlock(int block_id) const
const int kPCOnStackSize
Definition: globals.h:270
const XMMRegister xmm2
const Register edx
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
static const int kSharedFunctionInfoOffset
Definition: objects.h:7521
static const int kMaxValue
Definition: objects.h:1681
const int kCharSize
Definition: globals.h:261
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8914
static const int kExponentOffset
Definition: objects.h:1977
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1516
const int kUC16Size
Definition: globals.h:312
const uint32_t kStringEncodingMask
Definition: objects.h:609
static const int kInstanceTypeOffset
Definition: objects.h:6459
static const int kMantissaOffset
Definition: objects.h:1976