v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_IA32)
31 
33 #include "code-stubs.h"
34 #include "deoptimizer.h"
35 #include "stub-cache.h"
36 #include "codegen.h"
37 
38 namespace v8 {
39 namespace internal {
40 
41 
42 // When invoking builtins, we need to record the safepoint in the middle of
43 // the invoke instruction sequence generated by the macro assembler.
44 class SafepointGenerator : public CallWrapper {
45  public:
46  SafepointGenerator(LCodeGen* codegen,
47  LPointerMap* pointers,
48  Safepoint::DeoptMode mode)
49  : codegen_(codegen),
50  pointers_(pointers),
51  deopt_mode_(mode) {}
52  virtual ~SafepointGenerator() { }
53 
54  virtual void BeforeCall(int call_size) const {}
55 
56  virtual void AfterCall() const {
57  codegen_->RecordSafepoint(pointers_, deopt_mode_);
58  }
59 
60  private:
61  LCodeGen* codegen_;
62  LPointerMap* pointers_;
63  Safepoint::DeoptMode deopt_mode_;
64 };
65 
66 
67 #define __ masm()->
68 
69 bool LCodeGen::GenerateCode() {
70  HPhase phase("Z_Code generation", chunk());
71  ASSERT(is_unused());
72  status_ = GENERATING;
73  CpuFeatures::Scope scope(SSE2);
74 
75  CodeStub::GenerateFPStubs();
76 
77  // Open a frame scope to indicate that there is a frame on the stack. The
78  // MANUAL indicates that the scope shouldn't actually generate code to set up
79  // the frame (that is done in GeneratePrologue).
80  FrameScope frame_scope(masm_, StackFrame::MANUAL);
81 
82  dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
83  !chunk()->graph()->is_recursive()) ||
84  !info()->osr_ast_id().IsNone();
85 
86  return GeneratePrologue() &&
87  GenerateBody() &&
88  GenerateDeferredCode() &&
89  GenerateSafepointTable();
90 }
91 
92 
93 void LCodeGen::FinishCode(Handle<Code> code) {
94  ASSERT(is_done());
95  code->set_stack_slots(GetStackSlotCount());
96  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
97  PopulateDeoptimizationData(code);
99 }
100 
101 
102 void LCodeGen::Abort(const char* reason) {
103  info()->set_bailout_reason(reason);
104  status_ = ABORTED;
105 }
106 
107 
108 void LCodeGen::Comment(const char* format, ...) {
109  if (!FLAG_code_comments) return;
110  char buffer[4 * KB];
111  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
112  va_list arguments;
113  va_start(arguments, format);
114  builder.AddFormattedList(format, arguments);
115  va_end(arguments);
116 
117  // Copy the string before recording it in the assembler to avoid
118  // issues when the stack allocated buffer goes out of scope.
119  size_t length = builder.position();
120  Vector<char> copy = Vector<char>::New(length + 1);
121  memcpy(copy.start(), builder.Finalize(), copy.length());
122  masm()->RecordComment(copy.start());
123 }
124 
125 
126 bool LCodeGen::GeneratePrologue() {
127  ASSERT(is_generating());
128 
130 
131 #ifdef DEBUG
132  if (strlen(FLAG_stop_at) > 0 &&
133  info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
134  __ int3();
135  }
136 #endif
137 
138  // Strict mode functions and builtins need to replace the receiver
139  // with undefined when called as functions (without an explicit
140  // receiver object). ecx is zero for method calls and non-zero for
141  // function calls.
142  if (!info_->is_classic_mode() || info_->is_native()) {
143  Label ok;
144  __ test(ecx, Operand(ecx));
145  __ j(zero, &ok, Label::kNear);
146  // +1 for return address.
147  int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
148  __ mov(Operand(esp, receiver_offset),
149  Immediate(isolate()->factory()->undefined_value()));
150  __ bind(&ok);
151  }
152 
153 
154  if (dynamic_frame_alignment_) {
155  // Move state of dynamic frame alignment into edx.
156  __ mov(edx, Immediate(kNoAlignmentPadding));
157 
158  Label do_not_pad, align_loop;
160  // Align esp + 4 to a multiple of 2 * kPointerSize.
161  __ test(esp, Immediate(kPointerSize));
162  __ j(not_zero, &do_not_pad, Label::kNear);
163  __ push(Immediate(0));
164  __ mov(ebx, esp);
165  __ mov(edx, Immediate(kAlignmentPaddingPushed));
166  // Copy arguments, receiver, and return address.
167  __ mov(ecx, Immediate(scope()->num_parameters() + 2));
168 
169  __ bind(&align_loop);
170  __ mov(eax, Operand(ebx, 1 * kPointerSize));
171  __ mov(Operand(ebx, 0), eax);
172  __ add(Operand(ebx), Immediate(kPointerSize));
173  __ dec(ecx);
174  __ j(not_zero, &align_loop, Label::kNear);
175  __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
176  __ bind(&do_not_pad);
177  }
178 
179  __ push(ebp); // Caller's frame pointer.
180  __ mov(ebp, esp);
181  __ push(esi); // Callee's context.
182  __ push(edi); // Callee's JS function.
183 
184  if (dynamic_frame_alignment_ && FLAG_debug_code) {
185  __ test(esp, Immediate(kPointerSize));
186  __ Assert(zero, "frame is expected to be aligned");
187  }
188 
189  // Reserve space for the stack slots needed by the code.
190  int slots = GetStackSlotCount();
191  ASSERT_GE(slots, 1);
192  if (slots == 1) {
193  if (dynamic_frame_alignment_) {
194  __ push(edx);
195  } else {
196  __ push(Immediate(kNoAlignmentPadding));
197  }
198  } else {
199  if (FLAG_debug_code) {
200  __ mov(Operand(eax), Immediate(slots));
201  Label loop;
202  __ bind(&loop);
203  __ push(Immediate(kSlotsZapValue));
204  __ dec(eax);
205  __ j(not_zero, &loop);
206  } else {
207  __ sub(Operand(esp), Immediate(slots * kPointerSize));
208  #ifdef _MSC_VER
209  // On windows, you may not access the stack more than one page below
210  // the most recently mapped page. To make the allocated area randomly
211  // accessible, we write to each page in turn (the value is irrelevant).
212  const int kPageSize = 4 * KB;
213  for (int offset = slots * kPointerSize - kPageSize;
214  offset > 0;
215  offset -= kPageSize) {
216  __ mov(Operand(esp, offset), eax);
217  }
218  #endif
219  }
220 
221  // Store dynamic frame alignment state in the first local.
222  if (dynamic_frame_alignment_) {
223  __ mov(Operand(ebp,
225  edx);
226  } else {
227  __ mov(Operand(ebp,
229  Immediate(kNoAlignmentPadding));
230  }
231  }
232 
233  // Possibly allocate a local context.
234  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
235  if (heap_slots > 0) {
236  Comment(";;; Allocate local context");
237  // Argument to NewContext is the function, which is still in edi.
238  __ push(edi);
239  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
240  FastNewContextStub stub(heap_slots);
241  __ CallStub(&stub);
242  } else {
243  __ CallRuntime(Runtime::kNewFunctionContext, 1);
244  }
245  RecordSafepoint(Safepoint::kNoLazyDeopt);
246  // Context is returned in both eax and esi. It replaces the context
247  // passed to us. It's saved in the stack and kept live in esi.
249 
250  // Copy parameters into context if necessary.
251  int num_parameters = scope()->num_parameters();
252  for (int i = 0; i < num_parameters; i++) {
253  Variable* var = scope()->parameter(i);
254  if (var->IsContextSlot()) {
255  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
256  (num_parameters - 1 - i) * kPointerSize;
257  // Load parameter from stack.
258  __ mov(eax, Operand(ebp, parameter_offset));
259  // Store it in the context.
260  int context_offset = Context::SlotOffset(var->index());
261  __ mov(Operand(esi, context_offset), eax);
262  // Update the write barrier. This clobbers eax and ebx.
263  __ RecordWriteContextSlot(esi,
264  context_offset,
265  eax,
266  ebx,
268  }
269  }
270  Comment(";;; End allocate local context");
271  }
272 
273  // Trace the call.
274  if (FLAG_trace) {
275  // We have not executed any compiled code yet, so esi still holds the
276  // incoming context.
277  __ CallRuntime(Runtime::kTraceEnter, 0);
278  }
279  return !is_aborted();
280 }
281 
282 
283 bool LCodeGen::GenerateBody() {
284  ASSERT(is_generating());
285  bool emit_instructions = true;
286  for (current_instruction_ = 0;
287  !is_aborted() && current_instruction_ < instructions_->length();
288  current_instruction_++) {
289  LInstruction* instr = instructions_->at(current_instruction_);
290  if (instr->IsLabel()) {
291  LLabel* label = LLabel::cast(instr);
292  emit_instructions = !label->HasReplacement();
293  }
294 
295  if (emit_instructions) {
296  Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
297  instr->CompileToNative(this);
298  }
299  }
300  EnsureSpaceForLazyDeopt();
301  return !is_aborted();
302 }
303 
304 
305 bool LCodeGen::GenerateDeferredCode() {
306  ASSERT(is_generating());
307  if (deferred_.length() > 0) {
308  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
309  LDeferredCode* code = deferred_[i];
310  __ bind(code->entry());
311  Comment(";;; Deferred code @%d: %s.",
312  code->instruction_index(),
313  code->instr()->Mnemonic());
314  code->Generate();
315  __ jmp(code->exit());
316  }
317  }
318 
319  // Deferred code is the last part of the instruction sequence. Mark
320  // the generated code as done unless we bailed out.
321  if (!is_aborted()) status_ = DONE;
322  return !is_aborted();
323 }
324 
325 
326 bool LCodeGen::GenerateSafepointTable() {
327  ASSERT(is_done());
328  safepoints_.Emit(masm(), GetStackSlotCount());
329  return !is_aborted();
330 }
331 
332 
333 Register LCodeGen::ToRegister(int index) const {
334  return Register::FromAllocationIndex(index);
335 }
336 
337 
338 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
339  return XMMRegister::FromAllocationIndex(index);
340 }
341 
342 
343 Register LCodeGen::ToRegister(LOperand* op) const {
344  ASSERT(op->IsRegister());
345  return ToRegister(op->index());
346 }
347 
348 
349 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
350  ASSERT(op->IsDoubleRegister());
351  return ToDoubleRegister(op->index());
352 }
353 
354 
355 int LCodeGen::ToInteger32(LConstantOperand* op) const {
356  HConstant* constant = chunk_->LookupConstant(op);
357  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
358  ASSERT(constant->HasInteger32Value());
359  return constant->Integer32Value();
360 }
361 
362 
363 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
364  HConstant* constant = chunk_->LookupConstant(op);
365  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
366  return constant->handle();
367 }
368 
369 
370 double LCodeGen::ToDouble(LConstantOperand* op) const {
371  HConstant* constant = chunk_->LookupConstant(op);
372  ASSERT(constant->HasDoubleValue());
373  return constant->DoubleValue();
374 }
375 
376 
377 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
378  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
379 }
380 
381 
382 Operand LCodeGen::ToOperand(LOperand* op) const {
383  if (op->IsRegister()) return Operand(ToRegister(op));
384  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
385  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
386  int index = op->index();
387  if (index >= 0) {
388  // Local or spill slot. Skip the frame pointer, function, and
389  // context in the fixed part of the frame.
390  return Operand(ebp, -(index + 3) * kPointerSize);
391  } else {
392  // Incoming parameter. Skip the return address.
393  return Operand(ebp, -(index - 1) * kPointerSize);
394  }
395 }
396 
397 
398 Operand LCodeGen::HighOperand(LOperand* op) {
399  ASSERT(op->IsDoubleStackSlot());
400  int index = op->index();
401  int offset = (index >= 0) ? index + 3 : index - 1;
402  return Operand(ebp, -offset * kPointerSize);
403 }
404 
405 
406 void LCodeGen::WriteTranslation(LEnvironment* environment,
407  Translation* translation,
408  int* arguments_index,
409  int* arguments_count) {
410  if (environment == NULL) return;
411 
412  // The translation includes one command per value in the environment.
413  int translation_size = environment->values()->length();
414  // The output frame height does not include the parameters.
415  int height = translation_size - environment->parameter_count();
416 
417  // Function parameters are arguments to the outermost environment. The
418  // arguments index points to the first element of a sequence of tagged
419  // values on the stack that represent the arguments. This needs to be
420  // kept in sync with the LArgumentsElements implementation.
421  *arguments_index = -environment->parameter_count();
422  *arguments_count = environment->parameter_count();
423 
424  WriteTranslation(environment->outer(),
425  translation,
426  arguments_index,
427  arguments_count);
428  int closure_id = *info()->closure() != *environment->closure()
429  ? DefineDeoptimizationLiteral(environment->closure())
430  : Translation::kSelfLiteralId;
431  switch (environment->frame_type()) {
432  case JS_FUNCTION:
433  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
434  break;
435  case JS_CONSTRUCT:
436  translation->BeginConstructStubFrame(closure_id, translation_size);
437  break;
438  case JS_GETTER:
439  ASSERT(translation_size == 1);
440  ASSERT(height == 0);
441  translation->BeginGetterStubFrame(closure_id);
442  break;
443  case JS_SETTER:
444  ASSERT(translation_size == 2);
445  ASSERT(height == 0);
446  translation->BeginSetterStubFrame(closure_id);
447  break;
448  case ARGUMENTS_ADAPTOR:
449  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
450  break;
451  }
452 
453  // Inlined frames which push their arguments cause the index to be
454  // bumped and another stack area to be used for materialization.
455  if (environment->entry() != NULL &&
456  environment->entry()->arguments_pushed()) {
457  *arguments_index = *arguments_index < 0
458  ? GetStackSlotCount()
459  : *arguments_index + *arguments_count;
460  *arguments_count = environment->entry()->arguments_count() + 1;
461  }
462 
463  for (int i = 0; i < translation_size; ++i) {
464  LOperand* value = environment->values()->at(i);
465  // spilled_registers_ and spilled_double_registers_ are either
466  // both NULL or both set.
467  if (environment->spilled_registers() != NULL && value != NULL) {
468  if (value->IsRegister() &&
469  environment->spilled_registers()[value->index()] != NULL) {
470  translation->MarkDuplicate();
471  AddToTranslation(translation,
472  environment->spilled_registers()[value->index()],
473  environment->HasTaggedValueAt(i),
474  environment->HasUint32ValueAt(i),
475  *arguments_index,
476  *arguments_count);
477  } else if (
478  value->IsDoubleRegister() &&
479  environment->spilled_double_registers()[value->index()] != NULL) {
480  translation->MarkDuplicate();
481  AddToTranslation(
482  translation,
483  environment->spilled_double_registers()[value->index()],
484  false,
485  false,
486  *arguments_index,
487  *arguments_count);
488  }
489  }
490 
491  AddToTranslation(translation,
492  value,
493  environment->HasTaggedValueAt(i),
494  environment->HasUint32ValueAt(i),
495  *arguments_index,
496  *arguments_count);
497  }
498 }
499 
500 
501 void LCodeGen::AddToTranslation(Translation* translation,
502  LOperand* op,
503  bool is_tagged,
504  bool is_uint32,
505  int arguments_index,
506  int arguments_count) {
507  if (op == NULL) {
508  // TODO(twuerthinger): Introduce marker operands to indicate that this value
509  // is not present and must be reconstructed from the deoptimizer. Currently
510  // this is only used for the arguments object.
511  translation->StoreArgumentsObject(arguments_index, arguments_count);
512  } else if (op->IsStackSlot()) {
513  if (is_tagged) {
514  translation->StoreStackSlot(op->index());
515  } else if (is_uint32) {
516  translation->StoreUint32StackSlot(op->index());
517  } else {
518  translation->StoreInt32StackSlot(op->index());
519  }
520  } else if (op->IsDoubleStackSlot()) {
521  translation->StoreDoubleStackSlot(op->index());
522  } else if (op->IsArgument()) {
523  ASSERT(is_tagged);
524  int src_index = GetStackSlotCount() + op->index();
525  translation->StoreStackSlot(src_index);
526  } else if (op->IsRegister()) {
527  Register reg = ToRegister(op);
528  if (is_tagged) {
529  translation->StoreRegister(reg);
530  } else if (is_uint32) {
531  translation->StoreUint32Register(reg);
532  } else {
533  translation->StoreInt32Register(reg);
534  }
535  } else if (op->IsDoubleRegister()) {
536  XMMRegister reg = ToDoubleRegister(op);
537  translation->StoreDoubleRegister(reg);
538  } else if (op->IsConstantOperand()) {
539  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
540  int src_index = DefineDeoptimizationLiteral(constant->handle());
541  translation->StoreLiteral(src_index);
542  } else {
543  UNREACHABLE();
544  }
545 }
546 
547 
548 void LCodeGen::CallCodeGeneric(Handle<Code> code,
549  RelocInfo::Mode mode,
550  LInstruction* instr,
551  SafepointMode safepoint_mode) {
552  ASSERT(instr != NULL);
553  LPointerMap* pointers = instr->pointer_map();
554  RecordPosition(pointers->position());
555  __ call(code, mode);
556  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
557 
558  // Signal that we don't inline smi code before these stubs in the
559  // optimizing code generator.
560  if (code->kind() == Code::BINARY_OP_IC ||
561  code->kind() == Code::COMPARE_IC) {
562  __ nop();
563  }
564 }
565 
566 
567 void LCodeGen::CallCode(Handle<Code> code,
568  RelocInfo::Mode mode,
569  LInstruction* instr) {
570  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
571 }
572 
573 
574 void LCodeGen::CallRuntime(const Runtime::Function* fun,
575  int argc,
576  LInstruction* instr) {
577  ASSERT(instr != NULL);
578  ASSERT(instr->HasPointerMap());
579  LPointerMap* pointers = instr->pointer_map();
580  RecordPosition(pointers->position());
581 
582  __ CallRuntime(fun, argc);
583 
584  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
585 }
586 
587 
588 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
589  int argc,
590  LInstruction* instr,
591  LOperand* context) {
592  if (context->IsRegister()) {
593  if (!ToRegister(context).is(esi)) {
594  __ mov(esi, ToRegister(context));
595  }
596  } else if (context->IsStackSlot()) {
597  __ mov(esi, ToOperand(context));
598  } else if (context->IsConstantOperand()) {
599  HConstant* constant =
600  chunk_->LookupConstant(LConstantOperand::cast(context));
601  __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
602  } else {
603  UNREACHABLE();
604  }
605 
606  __ CallRuntimeSaveDoubles(id);
607  RecordSafepointWithRegisters(
608  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
609 }
610 
611 
612 void LCodeGen::RegisterEnvironmentForDeoptimization(
613  LEnvironment* environment, Safepoint::DeoptMode mode) {
614  if (!environment->HasBeenRegistered()) {
615  // Physical stack frame layout:
616  // -x ............. -4 0 ..................................... y
617  // [incoming arguments] [spill slots] [pushed outgoing arguments]
618 
619  // Layout of the environment:
620  // 0 ..................................................... size-1
621  // [parameters] [locals] [expression stack including arguments]
622 
623  // Layout of the translation:
624  // 0 ........................................................ size - 1 + 4
625  // [expression stack including arguments] [locals] [4 words] [parameters]
626  // |>------------ translation_size ------------<|
627 
628  int frame_count = 0;
629  int jsframe_count = 0;
630  int args_index = 0;
631  int args_count = 0;
632  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
633  ++frame_count;
634  if (e->frame_type() == JS_FUNCTION) {
635  ++jsframe_count;
636  }
637  }
638  Translation translation(&translations_, frame_count, jsframe_count, zone());
639  WriteTranslation(environment, &translation, &args_index, &args_count);
640  int deoptimization_index = deoptimizations_.length();
641  int pc_offset = masm()->pc_offset();
642  environment->Register(deoptimization_index,
643  translation.index(),
644  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
645  deoptimizations_.Add(environment, zone());
646  }
647 }
648 
649 
650 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
651  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
652  ASSERT(environment->HasBeenRegistered());
653  int id = environment->deoptimization_index();
655  if (entry == NULL) {
656  Abort("bailout was not prepared");
657  return;
658  }
659 
660  if (FLAG_deopt_every_n_times != 0) {
661  Handle<SharedFunctionInfo> shared(info_->shared_info());
662  Label no_deopt;
663  __ pushfd();
664  __ push(eax);
665  __ push(ebx);
666  __ mov(ebx, shared);
667  __ mov(eax,
669  __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
670  __ j(not_zero, &no_deopt, Label::kNear);
671  if (FLAG_trap_on_deopt) __ int3();
672  __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
674  eax);
675  __ pop(ebx);
676  __ pop(eax);
677  __ popfd();
678  __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
679 
680  __ bind(&no_deopt);
682  eax);
683  __ pop(ebx);
684  __ pop(eax);
685  __ popfd();
686  }
687 
688  if (cc == no_condition) {
689  if (FLAG_trap_on_deopt) __ int3();
690  __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
691  } else {
692  if (FLAG_trap_on_deopt) {
693  Label done;
694  __ j(NegateCondition(cc), &done, Label::kNear);
695  __ int3();
696  __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
697  __ bind(&done);
698  } else {
699  __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
700  }
701  }
702 }
703 
704 
705 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
706  int length = deoptimizations_.length();
707  if (length == 0) return;
708  Handle<DeoptimizationInputData> data =
709  factory()->NewDeoptimizationInputData(length, TENURED);
710 
711  Handle<ByteArray> translations = translations_.CreateByteArray();
712  data->SetTranslationByteArray(*translations);
713  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
714 
715  Handle<FixedArray> literals =
716  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
717  for (int i = 0; i < deoptimization_literals_.length(); i++) {
718  literals->set(i, *deoptimization_literals_[i]);
719  }
720  data->SetLiteralArray(*literals);
721 
722  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
723  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
724 
725  // Populate the deoptimization entries.
726  for (int i = 0; i < length; i++) {
727  LEnvironment* env = deoptimizations_[i];
728  data->SetAstId(i, env->ast_id());
729  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
730  data->SetArgumentsStackHeight(i,
731  Smi::FromInt(env->arguments_stack_height()));
732  data->SetPc(i, Smi::FromInt(env->pc_offset()));
733  }
734  code->set_deoptimization_data(*data);
735 }
736 
737 
738 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
739  int result = deoptimization_literals_.length();
740  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
741  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
742  }
743  deoptimization_literals_.Add(literal, zone());
744  return result;
745 }
746 
747 
748 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
749  ASSERT(deoptimization_literals_.length() == 0);
750 
751  const ZoneList<Handle<JSFunction> >* inlined_closures =
752  chunk()->inlined_closures();
753 
754  for (int i = 0, length = inlined_closures->length();
755  i < length;
756  i++) {
757  DefineDeoptimizationLiteral(inlined_closures->at(i));
758  }
759 
760  inlined_function_count_ = deoptimization_literals_.length();
761 }
762 
763 
764 void LCodeGen::RecordSafepointWithLazyDeopt(
765  LInstruction* instr, SafepointMode safepoint_mode) {
766  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
767  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
768  } else {
769  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
770  RecordSafepointWithRegisters(
771  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
772  }
773 }
774 
775 
776 void LCodeGen::RecordSafepoint(
777  LPointerMap* pointers,
778  Safepoint::Kind kind,
779  int arguments,
780  Safepoint::DeoptMode deopt_mode) {
781  ASSERT(kind == expected_safepoint_kind_);
782  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
783  Safepoint safepoint =
784  safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
785  for (int i = 0; i < operands->length(); i++) {
786  LOperand* pointer = operands->at(i);
787  if (pointer->IsStackSlot()) {
788  safepoint.DefinePointerSlot(pointer->index(), zone());
789  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
790  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
791  }
792  }
793 }
794 
795 
796 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
797  Safepoint::DeoptMode mode) {
798  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
799 }
800 
801 
802 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
803  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
804  RecordSafepoint(&empty_pointers, mode);
805 }
806 
807 
808 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
809  int arguments,
810  Safepoint::DeoptMode mode) {
811  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
812 }
813 
814 
815 void LCodeGen::RecordPosition(int position) {
816  if (position == RelocInfo::kNoPosition) return;
817  masm()->positions_recorder()->RecordPosition(position);
818 }
819 
820 
821 void LCodeGen::DoLabel(LLabel* label) {
822  if (label->is_loop_header()) {
823  Comment(";;; B%d - LOOP entry", label->block_id());
824  } else {
825  Comment(";;; B%d", label->block_id());
826  }
827  __ bind(label->label());
828  current_block_ = label->block_id();
829  DoGap(label);
830 }
831 
832 
833 void LCodeGen::DoParallelMove(LParallelMove* move) {
834  resolver_.Resolve(move);
835 }
836 
837 
838 void LCodeGen::DoGap(LGap* gap) {
839  for (int i = LGap::FIRST_INNER_POSITION;
841  i++) {
842  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
843  LParallelMove* move = gap->GetParallelMove(inner_pos);
844  if (move != NULL) DoParallelMove(move);
845  }
846 }
847 
848 
849 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
850  DoGap(instr);
851 }
852 
853 
854 void LCodeGen::DoParameter(LParameter* instr) {
855  // Nothing to do.
856 }
857 
858 
859 void LCodeGen::DoCallStub(LCallStub* instr) {
860  ASSERT(ToRegister(instr->context()).is(esi));
861  ASSERT(ToRegister(instr->result()).is(eax));
862  switch (instr->hydrogen()->major_key()) {
863  case CodeStub::RegExpConstructResult: {
864  RegExpConstructResultStub stub;
865  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
866  break;
867  }
868  case CodeStub::RegExpExec: {
869  RegExpExecStub stub;
870  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
871  break;
872  }
873  case CodeStub::SubString: {
874  SubStringStub stub;
875  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
876  break;
877  }
878  case CodeStub::NumberToString: {
879  NumberToStringStub stub;
880  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
881  break;
882  }
883  case CodeStub::StringAdd: {
884  StringAddStub stub(NO_STRING_ADD_FLAGS);
885  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
886  break;
887  }
888  case CodeStub::StringCompare: {
889  StringCompareStub stub;
890  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
891  break;
892  }
893  case CodeStub::TranscendentalCache: {
894  TranscendentalCacheStub stub(instr->transcendental_type(),
896  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
897  break;
898  }
899  default:
900  UNREACHABLE();
901  }
902 }
903 
904 
905 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
906  // Nothing to do.
907 }
908 
909 
910 void LCodeGen::DoModI(LModI* instr) {
911  if (instr->hydrogen()->HasPowerOf2Divisor()) {
912  Register dividend = ToRegister(instr->left());
913 
914  int32_t divisor =
915  HConstant::cast(instr->hydrogen()->right())->Integer32Value();
916 
917  if (divisor < 0) divisor = -divisor;
918 
919  Label positive_dividend, done;
920  __ test(dividend, Operand(dividend));
921  __ j(not_sign, &positive_dividend, Label::kNear);
922  __ neg(dividend);
923  __ and_(dividend, divisor - 1);
924  __ neg(dividend);
925  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
926  __ j(not_zero, &done, Label::kNear);
927  DeoptimizeIf(no_condition, instr->environment());
928  } else {
929  __ jmp(&done, Label::kNear);
930  }
931  __ bind(&positive_dividend);
932  __ and_(dividend, divisor - 1);
933  __ bind(&done);
934  } else {
935  Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
936  Register left_reg = ToRegister(instr->left());
937  Register right_reg = ToRegister(instr->right());
938  Register result_reg = ToRegister(instr->result());
939 
940  ASSERT(left_reg.is(eax));
941  ASSERT(result_reg.is(edx));
942  ASSERT(!right_reg.is(eax));
943  ASSERT(!right_reg.is(edx));
944 
945  // Check for x % 0.
946  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
947  __ test(right_reg, Operand(right_reg));
948  DeoptimizeIf(zero, instr->environment());
949  }
950 
951  __ test(left_reg, Operand(left_reg));
952  __ j(zero, &remainder_eq_dividend, Label::kNear);
953  __ j(sign, &slow, Label::kNear);
954 
955  __ test(right_reg, Operand(right_reg));
956  __ j(not_sign, &both_positive, Label::kNear);
957  // The sign of the divisor doesn't matter.
958  __ neg(right_reg);
959 
960  __ bind(&both_positive);
961  // If the dividend is smaller than the nonnegative
962  // divisor, the dividend is the result.
963  __ cmp(left_reg, Operand(right_reg));
964  __ j(less, &remainder_eq_dividend, Label::kNear);
965 
966  // Check if the divisor is a PowerOfTwo integer.
967  Register scratch = ToRegister(instr->temp());
968  __ mov(scratch, right_reg);
969  __ sub(Operand(scratch), Immediate(1));
970  __ test(scratch, Operand(right_reg));
971  __ j(not_zero, &do_subtraction, Label::kNear);
972  __ and_(left_reg, Operand(scratch));
973  __ jmp(&remainder_eq_dividend, Label::kNear);
974 
975  __ bind(&do_subtraction);
976  const int kUnfolds = 3;
977  // Try a few subtractions of the dividend.
978  __ mov(scratch, left_reg);
979  for (int i = 0; i < kUnfolds; i++) {
980  // Reduce the dividend by the divisor.
981  __ sub(left_reg, Operand(right_reg));
982  // Check if the dividend is less than the divisor.
983  __ cmp(left_reg, Operand(right_reg));
984  __ j(less, &remainder_eq_dividend, Label::kNear);
985  }
986  __ mov(left_reg, scratch);
987 
988  // Slow case, using idiv instruction.
989  __ bind(&slow);
990  // Sign extend to edx.
991  __ cdq();
992 
993  // Check for (0 % -x) that will produce negative zero.
994  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
995  Label positive_left;
996  Label done;
997  __ test(left_reg, Operand(left_reg));
998  __ j(not_sign, &positive_left, Label::kNear);
999  __ idiv(right_reg);
1000 
1001  // Test the remainder for 0, because then the result would be -0.
1002  __ test(result_reg, Operand(result_reg));
1003  __ j(not_zero, &done, Label::kNear);
1004 
1005  DeoptimizeIf(no_condition, instr->environment());
1006  __ bind(&positive_left);
1007  __ idiv(right_reg);
1008  __ bind(&done);
1009  } else {
1010  __ idiv(right_reg);
1011  }
1012  __ jmp(&done, Label::kNear);
1013 
1014  __ bind(&remainder_eq_dividend);
1015  __ mov(result_reg, left_reg);
1016 
1017  __ bind(&done);
1018  }
1019 }
1020 
1021 
1022 void LCodeGen::DoDivI(LDivI* instr) {
1023  LOperand* right = instr->right();
1024  ASSERT(ToRegister(instr->result()).is(eax));
1025  ASSERT(ToRegister(instr->left()).is(eax));
1026  ASSERT(!ToRegister(instr->right()).is(eax));
1027  ASSERT(!ToRegister(instr->right()).is(edx));
1028 
1029  Register left_reg = eax;
1030 
1031  // Check for x / 0.
1032  Register right_reg = ToRegister(right);
1033  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1034  __ test(right_reg, ToOperand(right));
1035  DeoptimizeIf(zero, instr->environment());
1036  }
1037 
1038  // Check for (0 / -x) that will produce negative zero.
1039  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1040  Label left_not_zero;
1041  __ test(left_reg, Operand(left_reg));
1042  __ j(not_zero, &left_not_zero, Label::kNear);
1043  __ test(right_reg, ToOperand(right));
1044  DeoptimizeIf(sign, instr->environment());
1045  __ bind(&left_not_zero);
1046  }
1047 
1048  // Check for (-kMinInt / -1).
1049  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1050  Label left_not_min_int;
1051  __ cmp(left_reg, kMinInt);
1052  __ j(not_zero, &left_not_min_int, Label::kNear);
1053  __ cmp(right_reg, -1);
1054  DeoptimizeIf(zero, instr->environment());
1055  __ bind(&left_not_min_int);
1056  }
1057 
1058  // Sign extend to edx.
1059  __ cdq();
1060  __ idiv(right_reg);
1061 
1062  // Deoptimize if remainder is not 0.
1063  __ test(edx, Operand(edx));
1064  DeoptimizeIf(not_zero, instr->environment());
1065 }
1066 
1067 
1068 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1069  ASSERT(instr->right()->IsConstantOperand());
1070 
1071  Register dividend = ToRegister(instr->left());
1072  int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1073  Register result = ToRegister(instr->result());
1074 
1075  switch (divisor) {
1076  case 0:
1077  DeoptimizeIf(no_condition, instr->environment());
1078  return;
1079 
1080  case 1:
1081  __ Move(result, dividend);
1082  return;
1083 
1084  case -1:
1085  __ Move(result, dividend);
1086  __ neg(result);
1087  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1088  DeoptimizeIf(zero, instr->environment());
1089  }
1090  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1091  DeoptimizeIf(overflow, instr->environment());
1092  }
1093  return;
1094  }
1095 
1096  uint32_t divisor_abs = abs(divisor);
1097  if (IsPowerOf2(divisor_abs)) {
1098  int32_t power = WhichPowerOf2(divisor_abs);
1099  if (divisor < 0) {
1100  // Input[dividend] is clobbered.
1101  // The sequence is tedious because neg(dividend) might overflow.
1102  __ mov(result, dividend);
1103  __ sar(dividend, 31);
1104  __ neg(result);
1105  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1106  DeoptimizeIf(zero, instr->environment());
1107  }
1108  __ shl(dividend, 32 - power);
1109  __ sar(result, power);
1110  __ not_(dividend);
1111  // Clear result.sign if dividend.sign is set.
1112  __ and_(result, dividend);
1113  } else {
1114  __ Move(result, dividend);
1115  __ sar(result, power);
1116  }
1117  } else {
1118  ASSERT(ToRegister(instr->left()).is(eax));
1119  ASSERT(ToRegister(instr->result()).is(edx));
1120  Register scratch = ToRegister(instr->temp());
1121 
1122  // Find b which: 2^b < divisor_abs < 2^(b+1).
1123  unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
1124  unsigned shift = 32 + b; // Precision +1bit (effectively).
1125  double multiplier_f =
1126  static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
1127  int64_t multiplier;
1128  if (multiplier_f - floor(multiplier_f) < 0.5) {
1129  multiplier = static_cast<int64_t>(floor(multiplier_f));
1130  } else {
1131  multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
1132  }
1133  // The multiplier is a uint32.
1134  ASSERT(multiplier > 0 &&
1135  multiplier < (static_cast<int64_t>(1) << 32));
1136  __ mov(scratch, dividend);
1137  if (divisor < 0 &&
1138  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1139  __ test(dividend, dividend);
1140  DeoptimizeIf(zero, instr->environment());
1141  }
1142  __ mov(edx, static_cast<int32_t>(multiplier));
1143  __ imul(edx);
1144  if (static_cast<int32_t>(multiplier) < 0) {
1145  __ add(edx, scratch);
1146  }
1147  Register reg_lo = eax;
1148  Register reg_byte_scratch = scratch;
1149  if (!reg_byte_scratch.is_byte_register()) {
1150  __ xchg(reg_lo, reg_byte_scratch);
1151  reg_lo = scratch;
1152  reg_byte_scratch = eax;
1153  }
1154  if (divisor < 0) {
1155  __ xor_(reg_byte_scratch, reg_byte_scratch);
1156  __ cmp(reg_lo, 0x40000000);
1157  __ setcc(above, reg_byte_scratch);
1158  __ neg(edx);
1159  __ sub(edx, reg_byte_scratch);
1160  } else {
1161  __ xor_(reg_byte_scratch, reg_byte_scratch);
1162  __ cmp(reg_lo, 0xC0000000);
1163  __ setcc(above_equal, reg_byte_scratch);
1164  __ add(edx, reg_byte_scratch);
1165  }
1166  __ sar(edx, shift - 32);
1167  }
1168 }
1169 
1170 
1171 void LCodeGen::DoMulI(LMulI* instr) {
1172  Register left = ToRegister(instr->left());
1173  LOperand* right = instr->right();
1174 
1175  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1176  __ mov(ToRegister(instr->temp()), left);
1177  }
1178 
1179  if (right->IsConstantOperand()) {
1180  // Try strength reductions on the multiplication.
1181  // All replacement instructions are at most as long as the imul
1182  // and have better latency.
1183  int constant = ToInteger32(LConstantOperand::cast(right));
1184  if (constant == -1) {
1185  __ neg(left);
1186  } else if (constant == 0) {
1187  __ xor_(left, Operand(left));
1188  } else if (constant == 2) {
1189  __ add(left, Operand(left));
1190  } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1191  // If we know that the multiplication can't overflow, it's safe to
1192  // use instructions that don't set the overflow flag for the
1193  // multiplication.
1194  switch (constant) {
1195  case 1:
1196  // Do nothing.
1197  break;
1198  case 3:
1199  __ lea(left, Operand(left, left, times_2, 0));
1200  break;
1201  case 4:
1202  __ shl(left, 2);
1203  break;
1204  case 5:
1205  __ lea(left, Operand(left, left, times_4, 0));
1206  break;
1207  case 8:
1208  __ shl(left, 3);
1209  break;
1210  case 9:
1211  __ lea(left, Operand(left, left, times_8, 0));
1212  break;
1213  case 16:
1214  __ shl(left, 4);
1215  break;
1216  default:
1217  __ imul(left, left, constant);
1218  break;
1219  }
1220  } else {
1221  __ imul(left, left, constant);
1222  }
1223  } else {
1224  __ imul(left, ToOperand(right));
1225  }
1226 
1227  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1228  DeoptimizeIf(overflow, instr->environment());
1229  }
1230 
1231  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1232  // Bail out if the result is supposed to be negative zero.
1233  Label done;
1234  __ test(left, Operand(left));
1235  __ j(not_zero, &done, Label::kNear);
1236  if (right->IsConstantOperand()) {
1237  if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1238  DeoptimizeIf(no_condition, instr->environment());
1239  } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1240  __ cmp(ToRegister(instr->temp()), Immediate(0));
1241  DeoptimizeIf(less, instr->environment());
1242  }
1243  } else {
1244  // Test the non-zero operand for negative sign.
1245  __ or_(ToRegister(instr->temp()), ToOperand(right));
1246  DeoptimizeIf(sign, instr->environment());
1247  }
1248  __ bind(&done);
1249  }
1250 }
1251 
1252 
1253 void LCodeGen::DoBitI(LBitI* instr) {
1254  LOperand* left = instr->left();
1255  LOperand* right = instr->right();
1256  ASSERT(left->Equals(instr->result()));
1257  ASSERT(left->IsRegister());
1258 
1259  if (right->IsConstantOperand()) {
1260  int right_operand = ToInteger32(LConstantOperand::cast(right));
1261  switch (instr->op()) {
1262  case Token::BIT_AND:
1263  __ and_(ToRegister(left), right_operand);
1264  break;
1265  case Token::BIT_OR:
1266  __ or_(ToRegister(left), right_operand);
1267  break;
1268  case Token::BIT_XOR:
1269  __ xor_(ToRegister(left), right_operand);
1270  break;
1271  default:
1272  UNREACHABLE();
1273  break;
1274  }
1275  } else {
1276  switch (instr->op()) {
1277  case Token::BIT_AND:
1278  __ and_(ToRegister(left), ToOperand(right));
1279  break;
1280  case Token::BIT_OR:
1281  __ or_(ToRegister(left), ToOperand(right));
1282  break;
1283  case Token::BIT_XOR:
1284  __ xor_(ToRegister(left), ToOperand(right));
1285  break;
1286  default:
1287  UNREACHABLE();
1288  break;
1289  }
1290  }
1291 }
1292 
1293 
1294 void LCodeGen::DoShiftI(LShiftI* instr) {
1295  LOperand* left = instr->left();
1296  LOperand* right = instr->right();
1297  ASSERT(left->Equals(instr->result()));
1298  ASSERT(left->IsRegister());
1299  if (right->IsRegister()) {
1300  ASSERT(ToRegister(right).is(ecx));
1301 
1302  switch (instr->op()) {
1303  case Token::SAR:
1304  __ sar_cl(ToRegister(left));
1305  break;
1306  case Token::SHR:
1307  __ shr_cl(ToRegister(left));
1308  if (instr->can_deopt()) {
1309  __ test(ToRegister(left), Immediate(0x80000000));
1310  DeoptimizeIf(not_zero, instr->environment());
1311  }
1312  break;
1313  case Token::SHL:
1314  __ shl_cl(ToRegister(left));
1315  break;
1316  default:
1317  UNREACHABLE();
1318  break;
1319  }
1320  } else {
1321  int value = ToInteger32(LConstantOperand::cast(right));
1322  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1323  switch (instr->op()) {
1324  case Token::SAR:
1325  if (shift_count != 0) {
1326  __ sar(ToRegister(left), shift_count);
1327  }
1328  break;
1329  case Token::SHR:
1330  if (shift_count == 0 && instr->can_deopt()) {
1331  __ test(ToRegister(left), Immediate(0x80000000));
1332  DeoptimizeIf(not_zero, instr->environment());
1333  } else {
1334  __ shr(ToRegister(left), shift_count);
1335  }
1336  break;
1337  case Token::SHL:
1338  if (shift_count != 0) {
1339  __ shl(ToRegister(left), shift_count);
1340  }
1341  break;
1342  default:
1343  UNREACHABLE();
1344  break;
1345  }
1346  }
1347 }
1348 
1349 
1350 void LCodeGen::DoSubI(LSubI* instr) {
1351  LOperand* left = instr->left();
1352  LOperand* right = instr->right();
1353  ASSERT(left->Equals(instr->result()));
1354 
1355  if (right->IsConstantOperand()) {
1356  __ sub(ToOperand(left), ToInteger32Immediate(right));
1357  } else {
1358  __ sub(ToRegister(left), ToOperand(right));
1359  }
1360  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1361  DeoptimizeIf(overflow, instr->environment());
1362  }
1363 }
1364 
1365 
1366 void LCodeGen::DoConstantI(LConstantI* instr) {
1367  ASSERT(instr->result()->IsRegister());
1368  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1369 }
1370 
1371 
1372 void LCodeGen::DoConstantD(LConstantD* instr) {
1373  ASSERT(instr->result()->IsDoubleRegister());
1374  XMMRegister res = ToDoubleRegister(instr->result());
1375  double v = instr->value();
1376  // Use xor to produce +0.0 in a fast and compact way, but avoid to
1377  // do so if the constant is -0.0.
1378  if (BitCast<uint64_t, double>(v) == 0) {
1379  __ xorps(res, res);
1380  } else {
1381  Register temp = ToRegister(instr->temp());
1382  uint64_t int_val = BitCast<uint64_t, double>(v);
1383  int32_t lower = static_cast<int32_t>(int_val);
1384  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1386  CpuFeatures::Scope scope(SSE4_1);
1387  if (lower != 0) {
1388  __ Set(temp, Immediate(lower));
1389  __ movd(res, Operand(temp));
1390  __ Set(temp, Immediate(upper));
1391  __ pinsrd(res, Operand(temp), 1);
1392  } else {
1393  __ xorps(res, res);
1394  __ Set(temp, Immediate(upper));
1395  __ pinsrd(res, Operand(temp), 1);
1396  }
1397  } else {
1398  __ Set(temp, Immediate(upper));
1399  __ movd(res, Operand(temp));
1400  __ psllq(res, 32);
1401  if (lower != 0) {
1402  __ Set(temp, Immediate(lower));
1403  __ movd(xmm0, Operand(temp));
1404  __ por(res, xmm0);
1405  }
1406  }
1407  }
1408 }
1409 
1410 
1411 void LCodeGen::DoConstantT(LConstantT* instr) {
1412  Register reg = ToRegister(instr->result());
1413  Handle<Object> handle = instr->value();
1414  if (handle->IsHeapObject()) {
1415  __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
1416  } else {
1417  __ Set(reg, Immediate(handle));
1418  }
1419 }
1420 
1421 
1422 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1423  Register result = ToRegister(instr->result());
1424  Register array = ToRegister(instr->value());
1425  __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
1426 }
1427 
1428 
1429 void LCodeGen::DoFixedArrayBaseLength(
1430  LFixedArrayBaseLength* instr) {
1431  Register result = ToRegister(instr->result());
1432  Register array = ToRegister(instr->value());
1433  __ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
1434 }
1435 
1436 
1437 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1438  Register result = ToRegister(instr->result());
1439  Register map = ToRegister(instr->value());
1440  __ EnumLength(result, map);
1441 }
1442 
1443 
1444 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1445  Register result = ToRegister(instr->result());
1446  Register input = ToRegister(instr->value());
1447 
1448  // Load map into |result|.
1449  __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
1450  // Load the map's "bit field 2" into |result|. We only need the first byte,
1451  // but the following masking takes care of that anyway.
1452  __ mov(result, FieldOperand(result, Map::kBitField2Offset));
1453  // Retrieve elements_kind from bit field 2.
1454  __ and_(result, Map::kElementsKindMask);
1455  __ shr(result, Map::kElementsKindShift);
1456 }
1457 
1458 
1459 void LCodeGen::DoValueOf(LValueOf* instr) {
1460  Register input = ToRegister(instr->value());
1461  Register result = ToRegister(instr->result());
1462  Register map = ToRegister(instr->temp());
1463  ASSERT(input.is(result));
1464 
1465  Label done;
1466  // If the object is a smi return the object.
1467  __ JumpIfSmi(input, &done, Label::kNear);
1468 
1469  // If the object is not a value type, return the object.
1470  __ CmpObjectType(input, JS_VALUE_TYPE, map);
1471  __ j(not_equal, &done, Label::kNear);
1472  __ mov(result, FieldOperand(input, JSValue::kValueOffset));
1473 
1474  __ bind(&done);
1475 }
1476 
1477 
1478 void LCodeGen::DoDateField(LDateField* instr) {
1479  Register object = ToRegister(instr->date());
1480  Register result = ToRegister(instr->result());
1481  Register scratch = ToRegister(instr->temp());
1482  Smi* index = instr->index();
1483  Label runtime, done;
1484  ASSERT(object.is(result));
1485  ASSERT(object.is(eax));
1486 
1487  __ test(object, Immediate(kSmiTagMask));
1488  DeoptimizeIf(zero, instr->environment());
1489  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
1490  DeoptimizeIf(not_equal, instr->environment());
1491 
1492  if (index->value() == 0) {
1493  __ mov(result, FieldOperand(object, JSDate::kValueOffset));
1494  } else {
1495  if (index->value() < JSDate::kFirstUncachedField) {
1496  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1497  __ mov(scratch, Operand::StaticVariable(stamp));
1498  __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
1499  __ j(not_equal, &runtime, Label::kNear);
1500  __ mov(result, FieldOperand(object, JSDate::kValueOffset +
1501  kPointerSize * index->value()));
1502  __ jmp(&done);
1503  }
1504  __ bind(&runtime);
1505  __ PrepareCallCFunction(2, scratch);
1506  __ mov(Operand(esp, 0), object);
1507  __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
1508  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1509  __ bind(&done);
1510  }
1511 }
1512 
1513 
1514 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1515  LOperand* input = instr->value();
1516  ASSERT(input->Equals(instr->result()));
1517  __ not_(ToRegister(input));
1518 }
1519 
1520 
1521 void LCodeGen::DoThrow(LThrow* instr) {
1522  __ push(ToOperand(instr->value()));
1523  ASSERT(ToRegister(instr->context()).is(esi));
1524  CallRuntime(Runtime::kThrow, 1, instr);
1525 
1526  if (FLAG_debug_code) {
1527  Comment("Unreachable code.");
1528  __ int3();
1529  }
1530 }
1531 
1532 
1533 void LCodeGen::DoAddI(LAddI* instr) {
1534  LOperand* left = instr->left();
1535  LOperand* right = instr->right();
1536  ASSERT(left->Equals(instr->result()));
1537 
1538  if (right->IsConstantOperand()) {
1539  __ add(ToOperand(left), ToInteger32Immediate(right));
1540  } else {
1541  __ add(ToRegister(left), ToOperand(right));
1542  }
1543 
1544  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1545  DeoptimizeIf(overflow, instr->environment());
1546  }
1547 }
1548 
1549 
1550 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1551  LOperand* left = instr->left();
1552  LOperand* right = instr->right();
1553  ASSERT(left->Equals(instr->result()));
1554  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1555  if (instr->hydrogen()->representation().IsInteger32()) {
1556  Label return_left;
1557  Condition condition = (operation == HMathMinMax::kMathMin)
1558  ? less_equal
1559  : greater_equal;
1560  if (right->IsConstantOperand()) {
1561  Operand left_op = ToOperand(left);
1562  Immediate right_imm = ToInteger32Immediate(right);
1563  __ cmp(left_op, right_imm);
1564  __ j(condition, &return_left, Label::kNear);
1565  __ mov(left_op, right_imm);
1566  } else {
1567  Register left_reg = ToRegister(left);
1568  Operand right_op = ToOperand(right);
1569  __ cmp(left_reg, right_op);
1570  __ j(condition, &return_left, Label::kNear);
1571  __ mov(left_reg, right_op);
1572  }
1573  __ bind(&return_left);
1574  } else {
1575  ASSERT(instr->hydrogen()->representation().IsDouble());
1576  Label check_nan_left, check_zero, return_left, return_right;
1577  Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1578  XMMRegister left_reg = ToDoubleRegister(left);
1579  XMMRegister right_reg = ToDoubleRegister(right);
1580  __ ucomisd(left_reg, right_reg);
1581  __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1582  __ j(equal, &check_zero, Label::kNear); // left == right.
1583  __ j(condition, &return_left, Label::kNear);
1584  __ jmp(&return_right, Label::kNear);
1585 
1586  __ bind(&check_zero);
1587  XMMRegister xmm_scratch = xmm0;
1588  __ xorps(xmm_scratch, xmm_scratch);
1589  __ ucomisd(left_reg, xmm_scratch);
1590  __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1591  // At this point, both left and right are either 0 or -0.
1592  if (operation == HMathMinMax::kMathMin) {
1593  __ orpd(left_reg, right_reg);
1594  } else {
1595  // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1596  __ addsd(left_reg, right_reg);
1597  }
1598  __ jmp(&return_left, Label::kNear);
1599 
1600  __ bind(&check_nan_left);
1601  __ ucomisd(left_reg, left_reg); // NaN check.
1602  __ j(parity_even, &return_left, Label::kNear); // left == NaN.
1603  __ bind(&return_right);
1604  __ movsd(left_reg, right_reg);
1605 
1606  __ bind(&return_left);
1607  }
1608 }
1609 
1610 
1611 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1612  XMMRegister left = ToDoubleRegister(instr->left());
1613  XMMRegister right = ToDoubleRegister(instr->right());
1614  XMMRegister result = ToDoubleRegister(instr->result());
1615  // Modulo uses a fixed result register.
1616  ASSERT(instr->op() == Token::MOD || left.is(result));
1617  switch (instr->op()) {
1618  case Token::ADD:
1619  __ addsd(left, right);
1620  break;
1621  case Token::SUB:
1622  __ subsd(left, right);
1623  break;
1624  case Token::MUL:
1625  __ mulsd(left, right);
1626  break;
1627  case Token::DIV:
1628  __ divsd(left, right);
1629  break;
1630  case Token::MOD: {
1631  // Pass two doubles as arguments on the stack.
1632  __ PrepareCallCFunction(4, eax);
1633  __ movdbl(Operand(esp, 0 * kDoubleSize), left);
1634  __ movdbl(Operand(esp, 1 * kDoubleSize), right);
1635  __ CallCFunction(
1636  ExternalReference::double_fp_operation(Token::MOD, isolate()),
1637  4);
1638 
1639  // Return value is in st(0) on ia32.
1640  // Store it into the (fixed) result register.
1641  __ sub(Operand(esp), Immediate(kDoubleSize));
1642  __ fstp_d(Operand(esp, 0));
1643  __ movdbl(result, Operand(esp, 0));
1644  __ add(Operand(esp), Immediate(kDoubleSize));
1645  break;
1646  }
1647  default:
1648  UNREACHABLE();
1649  break;
1650  }
1651 }
1652 
1653 
1654 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1655  ASSERT(ToRegister(instr->context()).is(esi));
1656  ASSERT(ToRegister(instr->left()).is(edx));
1657  ASSERT(ToRegister(instr->right()).is(eax));
1658  ASSERT(ToRegister(instr->result()).is(eax));
1659 
1660  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1661  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1662  __ nop(); // Signals no inlined code.
1663 }
1664 
1665 
1666 int LCodeGen::GetNextEmittedBlock(int block) {
1667  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1668  LLabel* label = chunk_->GetLabel(i);
1669  if (!label->HasReplacement()) return i;
1670  }
1671  return -1;
1672 }
1673 
1674 
1675 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1676  int next_block = GetNextEmittedBlock(current_block_);
1677  right_block = chunk_->LookupDestination(right_block);
1678  left_block = chunk_->LookupDestination(left_block);
1679 
1680  if (right_block == left_block) {
1681  EmitGoto(left_block);
1682  } else if (left_block == next_block) {
1683  __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1684  } else if (right_block == next_block) {
1685  __ j(cc, chunk_->GetAssemblyLabel(left_block));
1686  } else {
1687  __ j(cc, chunk_->GetAssemblyLabel(left_block));
1688  __ jmp(chunk_->GetAssemblyLabel(right_block));
1689  }
1690 }
1691 
1692 
1693 void LCodeGen::DoBranch(LBranch* instr) {
1694  int true_block = chunk_->LookupDestination(instr->true_block_id());
1695  int false_block = chunk_->LookupDestination(instr->false_block_id());
1696 
1697  Representation r = instr->hydrogen()->value()->representation();
1698  if (r.IsInteger32()) {
1699  Register reg = ToRegister(instr->value());
1700  __ test(reg, Operand(reg));
1701  EmitBranch(true_block, false_block, not_zero);
1702  } else if (r.IsDouble()) {
1703  XMMRegister reg = ToDoubleRegister(instr->value());
1704  __ xorps(xmm0, xmm0);
1705  __ ucomisd(reg, xmm0);
1706  EmitBranch(true_block, false_block, not_equal);
1707  } else {
1708  ASSERT(r.IsTagged());
1709  Register reg = ToRegister(instr->value());
1710  HType type = instr->hydrogen()->value()->type();
1711  if (type.IsBoolean()) {
1712  __ cmp(reg, factory()->true_value());
1713  EmitBranch(true_block, false_block, equal);
1714  } else if (type.IsSmi()) {
1715  __ test(reg, Operand(reg));
1716  EmitBranch(true_block, false_block, not_equal);
1717  } else {
1718  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1719  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1720 
1721  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1722  // Avoid deopts in the case where we've never executed this path before.
1723  if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1724 
1725  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1726  // undefined -> false.
1727  __ cmp(reg, factory()->undefined_value());
1728  __ j(equal, false_label);
1729  }
1730  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1731  // true -> true.
1732  __ cmp(reg, factory()->true_value());
1733  __ j(equal, true_label);
1734  // false -> false.
1735  __ cmp(reg, factory()->false_value());
1736  __ j(equal, false_label);
1737  }
1738  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1739  // 'null' -> false.
1740  __ cmp(reg, factory()->null_value());
1741  __ j(equal, false_label);
1742  }
1743 
1744  if (expected.Contains(ToBooleanStub::SMI)) {
1745  // Smis: 0 -> false, all other -> true.
1746  __ test(reg, Operand(reg));
1747  __ j(equal, false_label);
1748  __ JumpIfSmi(reg, true_label);
1749  } else if (expected.NeedsMap()) {
1750  // If we need a map later and have a Smi -> deopt.
1751  __ test(reg, Immediate(kSmiTagMask));
1752  DeoptimizeIf(zero, instr->environment());
1753  }
1754 
1755  Register map = no_reg; // Keep the compiler happy.
1756  if (expected.NeedsMap()) {
1757  map = ToRegister(instr->temp());
1758  ASSERT(!map.is(reg));
1759  __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
1760 
1761  if (expected.CanBeUndetectable()) {
1762  // Undetectable -> false.
1763  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
1764  1 << Map::kIsUndetectable);
1765  __ j(not_zero, false_label);
1766  }
1767  }
1768 
1769  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1770  // spec object -> true.
1771  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
1772  __ j(above_equal, true_label);
1773  }
1774 
1775  if (expected.Contains(ToBooleanStub::STRING)) {
1776  // String value -> false iff empty.
1777  Label not_string;
1778  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
1779  __ j(above_equal, &not_string, Label::kNear);
1780  __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
1781  __ j(not_zero, true_label);
1782  __ jmp(false_label);
1783  __ bind(&not_string);
1784  }
1785 
1786  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1787  // heap number -> false iff +0, -0, or NaN.
1788  Label not_heap_number;
1790  factory()->heap_number_map());
1791  __ j(not_equal, &not_heap_number, Label::kNear);
1792  __ fldz();
1794  __ FCmp();
1795  __ j(zero, false_label);
1796  __ jmp(true_label);
1797  __ bind(&not_heap_number);
1798  }
1799 
1800  // We've seen something for the first time -> deopt.
1801  DeoptimizeIf(no_condition, instr->environment());
1802  }
1803  }
1804 }
1805 
1806 
1807 void LCodeGen::EmitGoto(int block) {
1808  block = chunk_->LookupDestination(block);
1809  int next_block = GetNextEmittedBlock(current_block_);
1810  if (block != next_block) {
1811  __ jmp(chunk_->GetAssemblyLabel(block));
1812  }
1813 }
1814 
1815 
1816 void LCodeGen::DoGoto(LGoto* instr) {
1817  EmitGoto(instr->block_id());
1818 }
1819 
1820 
1821 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1822  Condition cond = no_condition;
1823  switch (op) {
1824  case Token::EQ:
1825  case Token::EQ_STRICT:
1826  cond = equal;
1827  break;
1828  case Token::LT:
1829  cond = is_unsigned ? below : less;
1830  break;
1831  case Token::GT:
1832  cond = is_unsigned ? above : greater;
1833  break;
1834  case Token::LTE:
1835  cond = is_unsigned ? below_equal : less_equal;
1836  break;
1837  case Token::GTE:
1838  cond = is_unsigned ? above_equal : greater_equal;
1839  break;
1840  case Token::IN:
1841  case Token::INSTANCEOF:
1842  default:
1843  UNREACHABLE();
1844  }
1845  return cond;
1846 }
1847 
1848 
1849 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1850  LOperand* left = instr->left();
1851  LOperand* right = instr->right();
1852  int false_block = chunk_->LookupDestination(instr->false_block_id());
1853  int true_block = chunk_->LookupDestination(instr->true_block_id());
1854  Condition cc = TokenToCondition(instr->op(), instr->is_double());
1855 
1856  if (left->IsConstantOperand() && right->IsConstantOperand()) {
1857  // We can statically evaluate the comparison.
1858  double left_val = ToDouble(LConstantOperand::cast(left));
1859  double right_val = ToDouble(LConstantOperand::cast(right));
1860  int next_block =
1861  EvalComparison(instr->op(), left_val, right_val) ? true_block
1862  : false_block;
1863  EmitGoto(next_block);
1864  } else {
1865  if (instr->is_double()) {
1866  // Don't base result on EFLAGS when a NaN is involved. Instead
1867  // jump to the false block.
1868  __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1869  __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1870  } else {
1871  if (right->IsConstantOperand()) {
1872  __ cmp(ToRegister(left), ToInteger32Immediate(right));
1873  } else if (left->IsConstantOperand()) {
1874  __ cmp(ToOperand(right), ToInteger32Immediate(left));
1875  // We transposed the operands. Reverse the condition.
1876  cc = ReverseCondition(cc);
1877  } else {
1878  __ cmp(ToRegister(left), ToOperand(right));
1879  }
1880  }
1881  EmitBranch(true_block, false_block, cc);
1882  }
1883 }
1884 
1885 
1886 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1887  Register left = ToRegister(instr->left());
1888  Operand right = ToOperand(instr->right());
1889  int false_block = chunk_->LookupDestination(instr->false_block_id());
1890  int true_block = chunk_->LookupDestination(instr->true_block_id());
1891 
1892  __ cmp(left, Operand(right));
1893  EmitBranch(true_block, false_block, equal);
1894 }
1895 
1896 
1897 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1898  Register left = ToRegister(instr->left());
1899  int true_block = chunk_->LookupDestination(instr->true_block_id());
1900  int false_block = chunk_->LookupDestination(instr->false_block_id());
1901 
1902  __ cmp(left, instr->hydrogen()->right());
1903  EmitBranch(true_block, false_block, equal);
1904 }
1905 
1906 
1907 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1908  Register reg = ToRegister(instr->value());
1909  int false_block = chunk_->LookupDestination(instr->false_block_id());
1910 
1911  // If the expression is known to be untagged or a smi, then it's definitely
1912  // not null, and it can't be a an undetectable object.
1913  if (instr->hydrogen()->representation().IsSpecialization() ||
1914  instr->hydrogen()->type().IsSmi()) {
1915  EmitGoto(false_block);
1916  return;
1917  }
1918 
1919  int true_block = chunk_->LookupDestination(instr->true_block_id());
1920  Handle<Object> nil_value = instr->nil() == kNullValue ?
1921  factory()->null_value() :
1922  factory()->undefined_value();
1923  __ cmp(reg, nil_value);
1924  if (instr->kind() == kStrictEquality) {
1925  EmitBranch(true_block, false_block, equal);
1926  } else {
1927  Handle<Object> other_nil_value = instr->nil() == kNullValue ?
1928  factory()->undefined_value() :
1929  factory()->null_value();
1930  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1931  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1932  __ j(equal, true_label);
1933  __ cmp(reg, other_nil_value);
1934  __ j(equal, true_label);
1935  __ JumpIfSmi(reg, false_label);
1936  // Check for undetectable objects by looking in the bit field in
1937  // the map. The object has already been smi checked.
1938  Register scratch = ToRegister(instr->temp());
1939  __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1940  __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1941  __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1942  EmitBranch(true_block, false_block, not_zero);
1943  }
1944 }
1945 
1946 
1947 Condition LCodeGen::EmitIsObject(Register input,
1948  Register temp1,
1949  Label* is_not_object,
1950  Label* is_object) {
1951  __ JumpIfSmi(input, is_not_object);
1952 
1953  __ cmp(input, isolate()->factory()->null_value());
1954  __ j(equal, is_object);
1955 
1956  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
1957  // Undetectable objects behave like undefined.
1958  __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
1959  1 << Map::kIsUndetectable);
1960  __ j(not_zero, is_not_object);
1961 
1962  __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
1964  __ j(below, is_not_object);
1966  return below_equal;
1967 }
1968 
1969 
1970 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1971  Register reg = ToRegister(instr->value());
1972  Register temp = ToRegister(instr->temp());
1973 
1974  int true_block = chunk_->LookupDestination(instr->true_block_id());
1975  int false_block = chunk_->LookupDestination(instr->false_block_id());
1976  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1977  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1978 
1979  Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
1980 
1981  EmitBranch(true_block, false_block, true_cond);
1982 }
1983 
1984 
1985 Condition LCodeGen::EmitIsString(Register input,
1986  Register temp1,
1987  Label* is_not_string) {
1988  __ JumpIfSmi(input, is_not_string);
1989 
1990  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
1991 
1992  return cond;
1993 }
1994 
1995 
1996 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1997  Register reg = ToRegister(instr->value());
1998  Register temp = ToRegister(instr->temp());
1999 
2000  int true_block = chunk_->LookupDestination(instr->true_block_id());
2001  int false_block = chunk_->LookupDestination(instr->false_block_id());
2002  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2003 
2004  Condition true_cond = EmitIsString(reg, temp, false_label);
2005 
2006  EmitBranch(true_block, false_block, true_cond);
2007 }
2008 
2009 
2010 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2011  Operand input = ToOperand(instr->value());
2012 
2013  int true_block = chunk_->LookupDestination(instr->true_block_id());
2014  int false_block = chunk_->LookupDestination(instr->false_block_id());
2015 
2016  __ test(input, Immediate(kSmiTagMask));
2017  EmitBranch(true_block, false_block, zero);
2018 }
2019 
2020 
2021 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2022  Register input = ToRegister(instr->value());
2023  Register temp = ToRegister(instr->temp());
2024 
2025  int true_block = chunk_->LookupDestination(instr->true_block_id());
2026  int false_block = chunk_->LookupDestination(instr->false_block_id());
2027 
2028  STATIC_ASSERT(kSmiTag == 0);
2029  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
2030  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2031  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2032  1 << Map::kIsUndetectable);
2033  EmitBranch(true_block, false_block, not_zero);
2034 }
2035 
2036 
2037 static Condition ComputeCompareCondition(Token::Value op) {
2038  switch (op) {
2039  case Token::EQ_STRICT:
2040  case Token::EQ:
2041  return equal;
2042  case Token::LT:
2043  return less;
2044  case Token::GT:
2045  return greater;
2046  case Token::LTE:
2047  return less_equal;
2048  case Token::GTE:
2049  return greater_equal;
2050  default:
2051  UNREACHABLE();
2052  return no_condition;
2053  }
2054 }
2055 
2056 
2057 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2058  Token::Value op = instr->op();
2059  int true_block = chunk_->LookupDestination(instr->true_block_id());
2060  int false_block = chunk_->LookupDestination(instr->false_block_id());
2061 
2062  Handle<Code> ic = CompareIC::GetUninitialized(op);
2063  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2064 
2065  Condition condition = ComputeCompareCondition(op);
2066  __ test(eax, Operand(eax));
2067 
2068  EmitBranch(true_block, false_block, condition);
2069 }
2070 
2071 
2072 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2073  InstanceType from = instr->from();
2074  InstanceType to = instr->to();
2075  if (from == FIRST_TYPE) return to;
2076  ASSERT(from == to || to == LAST_TYPE);
2077  return from;
2078 }
2079 
2080 
2081 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2082  InstanceType from = instr->from();
2083  InstanceType to = instr->to();
2084  if (from == to) return equal;
2085  if (to == LAST_TYPE) return above_equal;
2086  if (from == FIRST_TYPE) return below_equal;
2087  UNREACHABLE();
2088  return equal;
2089 }
2090 
2091 
2092 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2093  Register input = ToRegister(instr->value());
2094  Register temp = ToRegister(instr->temp());
2095 
2096  int true_block = chunk_->LookupDestination(instr->true_block_id());
2097  int false_block = chunk_->LookupDestination(instr->false_block_id());
2098 
2099  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2100 
2101  __ JumpIfSmi(input, false_label);
2102 
2103  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2104  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
2105 }
2106 
2107 
2108 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2109  Register input = ToRegister(instr->value());
2110  Register result = ToRegister(instr->result());
2111 
2112  __ AssertString(input);
2113 
2114  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2115  __ IndexFromHash(result, result);
2116 }
2117 
2118 
2119 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2120  LHasCachedArrayIndexAndBranch* instr) {
2121  Register input = ToRegister(instr->value());
2122 
2123  int true_block = chunk_->LookupDestination(instr->true_block_id());
2124  int false_block = chunk_->LookupDestination(instr->false_block_id());
2125 
2128  EmitBranch(true_block, false_block, equal);
2129 }
2130 
2131 
2132 // Branches to a label or falls through with the answer in the z flag. Trashes
2133 // the temp registers, but not the input.
2134 void LCodeGen::EmitClassOfTest(Label* is_true,
2135  Label* is_false,
2136  Handle<String>class_name,
2137  Register input,
2138  Register temp,
2139  Register temp2) {
2140  ASSERT(!input.is(temp));
2141  ASSERT(!input.is(temp2));
2142  ASSERT(!temp.is(temp2));
2143  __ JumpIfSmi(input, is_false);
2144 
2145  if (class_name->IsEqualTo(CStrVector("Function"))) {
2146  // Assuming the following assertions, we can use the same compares to test
2147  // for both being a function type and being in the object type range.
2152  LAST_SPEC_OBJECT_TYPE - 1);
2154  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2155  __ j(below, is_false);
2156  __ j(equal, is_true);
2157  __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2158  __ j(equal, is_true);
2159  } else {
2160  // Faster code path to avoid two compares: subtract lower bound from the
2161  // actual type and do a signed compare with the width of the type range.
2162  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2163  __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2164  __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2165  __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2167  __ j(above, is_false);
2168  }
2169 
2170  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2171  // Check if the constructor in the map is a function.
2172  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
2173  // Objects with a non-function constructor have class 'Object'.
2174  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
2175  if (class_name->IsEqualTo(CStrVector("Object"))) {
2176  __ j(not_equal, is_true);
2177  } else {
2178  __ j(not_equal, is_false);
2179  }
2180 
2181  // temp now contains the constructor function. Grab the
2182  // instance class name from there.
2184  __ mov(temp, FieldOperand(temp,
2186  // The class name we are testing against is a symbol because it's a literal.
2187  // The name in the constructor is a symbol because of the way the context is
2188  // booted. This routine isn't expected to work for random API-created
2189  // classes and it doesn't have to because you can't access it with natives
2190  // syntax. Since both sides are symbols it is sufficient to use an identity
2191  // comparison.
2192  __ cmp(temp, class_name);
2193  // End with the answer in the z flag.
2194 }
2195 
2196 
2197 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2198  Register input = ToRegister(instr->value());
2199  Register temp = ToRegister(instr->temp());
2200  Register temp2 = ToRegister(instr->temp2());
2201 
2202  Handle<String> class_name = instr->hydrogen()->class_name();
2203 
2204  int true_block = chunk_->LookupDestination(instr->true_block_id());
2205  int false_block = chunk_->LookupDestination(instr->false_block_id());
2206 
2207  Label* true_label = chunk_->GetAssemblyLabel(true_block);
2208  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2209 
2210  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2211 
2212  EmitBranch(true_block, false_block, equal);
2213 }
2214 
2215 
2216 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2217  Register reg = ToRegister(instr->value());
2218  int true_block = instr->true_block_id();
2219  int false_block = instr->false_block_id();
2220 
2221  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2222  EmitBranch(true_block, false_block, equal);
2223 }
2224 
2225 
2226 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2227  // Object and function are in fixed registers defined by the stub.
2228  ASSERT(ToRegister(instr->context()).is(esi));
2229  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2230  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2231 
2232  Label true_value, done;
2233  __ test(eax, Operand(eax));
2234  __ j(zero, &true_value, Label::kNear);
2235  __ mov(ToRegister(instr->result()), factory()->false_value());
2236  __ jmp(&done, Label::kNear);
2237  __ bind(&true_value);
2238  __ mov(ToRegister(instr->result()), factory()->true_value());
2239  __ bind(&done);
2240 }
2241 
2242 
2243 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2244  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2245  public:
2246  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2247  LInstanceOfKnownGlobal* instr)
2248  : LDeferredCode(codegen), instr_(instr) { }
2249  virtual void Generate() {
2250  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2251  }
2252  virtual LInstruction* instr() { return instr_; }
2253  Label* map_check() { return &map_check_; }
2254  private:
2255  LInstanceOfKnownGlobal* instr_;
2256  Label map_check_;
2257  };
2258 
2259  DeferredInstanceOfKnownGlobal* deferred;
2260  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2261 
2262  Label done, false_result;
2263  Register object = ToRegister(instr->value());
2264  Register temp = ToRegister(instr->temp());
2265 
2266  // A Smi is not an instance of anything.
2267  __ JumpIfSmi(object, &false_result);
2268 
2269  // This is the inlined call site instanceof cache. The two occurences of the
2270  // hole value will be patched to the last map/result pair generated by the
2271  // instanceof stub.
2272  Label cache_miss;
2273  Register map = ToRegister(instr->temp());
2274  __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2275  __ bind(deferred->map_check()); // Label for calculating code patching.
2276  Handle<JSGlobalPropertyCell> cache_cell =
2277  factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2278  __ cmp(map, Operand::Cell(cache_cell)); // Patched to cached map.
2279  __ j(not_equal, &cache_miss, Label::kNear);
2280  __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
2281  __ jmp(&done);
2282 
2283  // The inlined call site cache did not match. Check for null and string
2284  // before calling the deferred code.
2285  __ bind(&cache_miss);
2286  // Null is not an instance of anything.
2287  __ cmp(object, factory()->null_value());
2288  __ j(equal, &false_result);
2289 
2290  // String values are not instances of anything.
2291  Condition is_string = masm_->IsObjectStringType(object, temp, temp);
2292  __ j(is_string, &false_result);
2293 
2294  // Go to the deferred code.
2295  __ jmp(deferred->entry());
2296 
2297  __ bind(&false_result);
2298  __ mov(ToRegister(instr->result()), factory()->false_value());
2299 
2300  // Here result has either true or false. Deferred code also produces true or
2301  // false object.
2302  __ bind(deferred->exit());
2303  __ bind(&done);
2304 }
2305 
2306 
2307 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2308  Label* map_check) {
2309  PushSafepointRegistersScope scope(this);
2310 
2312  flags = static_cast<InstanceofStub::Flags>(
2314  flags = static_cast<InstanceofStub::Flags>(
2316  flags = static_cast<InstanceofStub::Flags>(
2318  InstanceofStub stub(flags);
2319 
2320  // Get the temp register reserved by the instruction. This needs to be a
2321  // register which is pushed last by PushSafepointRegisters as top of the
2322  // stack is used to pass the offset to the location of the map check to
2323  // the stub.
2324  Register temp = ToRegister(instr->temp());
2325  ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
2326  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2327  static const int kAdditionalDelta = 13;
2328  int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2329  __ mov(temp, Immediate(delta));
2330  __ StoreToSafepointRegisterSlot(temp, temp);
2331  CallCodeGeneric(stub.GetCode(),
2332  RelocInfo::CODE_TARGET,
2333  instr,
2334  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2335  // Get the deoptimization index of the LLazyBailout-environment that
2336  // corresponds to this instruction.
2337  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2338  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2339 
2340  // Put the result value into the eax slot and restore all registers.
2341  __ StoreToSafepointRegisterSlot(eax, eax);
2342 }
2343 
2344 
2345 void LCodeGen::DoCmpT(LCmpT* instr) {
2346  Token::Value op = instr->op();
2347 
2348  Handle<Code> ic = CompareIC::GetUninitialized(op);
2349  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2350 
2351  Condition condition = ComputeCompareCondition(op);
2352  Label true_value, done;
2353  __ test(eax, Operand(eax));
2354  __ j(condition, &true_value, Label::kNear);
2355  __ mov(ToRegister(instr->result()), factory()->false_value());
2356  __ jmp(&done, Label::kNear);
2357  __ bind(&true_value);
2358  __ mov(ToRegister(instr->result()), factory()->true_value());
2359  __ bind(&done);
2360 }
2361 
2362 
2363 void LCodeGen::DoReturn(LReturn* instr) {
2364  if (FLAG_trace) {
2365  // Preserve the return value on the stack and rely on the runtime call
2366  // to return the value in the same register. We're leaving the code
2367  // managed by the register allocator and tearing down the frame, it's
2368  // safe to write to the context register.
2369  __ push(eax);
2371  __ CallRuntime(Runtime::kTraceExit, 1);
2372  }
2373  if (dynamic_frame_alignment_) {
2374  // Fetch the state of the dynamic frame alignment.
2375  __ mov(edx, Operand(ebp,
2377  }
2378  __ mov(esp, ebp);
2379  __ pop(ebp);
2380  if (dynamic_frame_alignment_) {
2381  Label no_padding;
2382  __ cmp(edx, Immediate(kNoAlignmentPadding));
2383  __ j(equal, &no_padding);
2384  if (FLAG_debug_code) {
2385  __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
2386  Immediate(kAlignmentZapValue));
2387  __ Assert(equal, "expected alignment marker");
2388  }
2389  __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
2390  __ bind(&no_padding);
2391  }
2392  __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
2393 }
2394 
2395 
2396 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2397  Register result = ToRegister(instr->result());
2398  __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
2399  if (instr->hydrogen()->RequiresHoleCheck()) {
2400  __ cmp(result, factory()->the_hole_value());
2401  DeoptimizeIf(equal, instr->environment());
2402  }
2403 }
2404 
2405 
2406 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2407  ASSERT(ToRegister(instr->context()).is(esi));
2408  ASSERT(ToRegister(instr->global_object()).is(edx));
2409  ASSERT(ToRegister(instr->result()).is(eax));
2410 
2411  __ mov(ecx, instr->name());
2412  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
2413  RelocInfo::CODE_TARGET_CONTEXT;
2414  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2415  CallCode(ic, mode, instr);
2416 }
2417 
2418 
2419 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2420  Register value = ToRegister(instr->value());
2421  Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
2422 
2423  // If the cell we are storing to contains the hole it could have
2424  // been deleted from the property dictionary. In that case, we need
2425  // to update the property details in the property dictionary to mark
2426  // it as no longer deleted. We deoptimize in that case.
2427  if (instr->hydrogen()->RequiresHoleCheck()) {
2428  __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
2429  DeoptimizeIf(equal, instr->environment());
2430  }
2431 
2432  // Store the value.
2433  __ mov(Operand::Cell(cell_handle), value);
2434  // Cells are always rescanned, so no write barrier here.
2435 }
2436 
2437 
2438 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2439  ASSERT(ToRegister(instr->context()).is(esi));
2440  ASSERT(ToRegister(instr->global_object()).is(edx));
2441  ASSERT(ToRegister(instr->value()).is(eax));
2442 
2443  __ mov(ecx, instr->name());
2444  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2445  ? isolate()->builtins()->StoreIC_Initialize_Strict()
2446  : isolate()->builtins()->StoreIC_Initialize();
2447  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2448 }
2449 
2450 
2451 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2452  Register context = ToRegister(instr->context());
2453  Register result = ToRegister(instr->result());
2454  __ mov(result, ContextOperand(context, instr->slot_index()));
2455 
2456  if (instr->hydrogen()->RequiresHoleCheck()) {
2457  __ cmp(result, factory()->the_hole_value());
2458  if (instr->hydrogen()->DeoptimizesOnHole()) {
2459  DeoptimizeIf(equal, instr->environment());
2460  } else {
2461  Label is_not_hole;
2462  __ j(not_equal, &is_not_hole, Label::kNear);
2463  __ mov(result, factory()->undefined_value());
2464  __ bind(&is_not_hole);
2465  }
2466  }
2467 }
2468 
2469 
2470 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2471  Register context = ToRegister(instr->context());
2472  Register value = ToRegister(instr->value());
2473 
2474  Label skip_assignment;
2475 
2476  Operand target = ContextOperand(context, instr->slot_index());
2477  if (instr->hydrogen()->RequiresHoleCheck()) {
2478  __ cmp(target, factory()->the_hole_value());
2479  if (instr->hydrogen()->DeoptimizesOnHole()) {
2480  DeoptimizeIf(equal, instr->environment());
2481  } else {
2482  __ j(not_equal, &skip_assignment, Label::kNear);
2483  }
2484  }
2485 
2486  __ mov(target, value);
2487  if (instr->hydrogen()->NeedsWriteBarrier()) {
2488  HType type = instr->hydrogen()->value()->type();
2489  SmiCheck check_needed =
2490  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2491  Register temp = ToRegister(instr->temp());
2492  int offset = Context::SlotOffset(instr->slot_index());
2493  __ RecordWriteContextSlot(context,
2494  offset,
2495  value,
2496  temp,
2497  kSaveFPRegs,
2499  check_needed);
2500  }
2501 
2502  __ bind(&skip_assignment);
2503 }
2504 
2505 
2506 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2507  Register object = ToRegister(instr->object());
2508  Register result = ToRegister(instr->result());
2509  if (instr->hydrogen()->is_in_object()) {
2510  __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
2511  } else {
2512  __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2513  __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
2514  }
2515 }
2516 
2517 
2518 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2519  Register object,
2520  Handle<Map> type,
2521  Handle<String> name,
2522  LEnvironment* env) {
2523  LookupResult lookup(isolate());
2524  type->LookupDescriptor(NULL, *name, &lookup);
2525  ASSERT(lookup.IsFound() || lookup.IsCacheable());
2526  if (lookup.IsField()) {
2527  int index = lookup.GetLocalFieldIndexFromMap(*type);
2528  int offset = index * kPointerSize;
2529  if (index < 0) {
2530  // Negative property indices are in-object properties, indexed
2531  // from the end of the fixed part of the object.
2532  __ mov(result, FieldOperand(object, offset + type->instance_size()));
2533  } else {
2534  // Non-negative property indices are in the properties array.
2535  __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2536  __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
2537  }
2538  } else if (lookup.IsConstantFunction()) {
2539  Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2540  __ LoadHeapObject(result, function);
2541  } else {
2542  // Negative lookup.
2543  // Check prototypes.
2544  Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
2545  Heap* heap = type->GetHeap();
2546  while (*current != heap->null_value()) {
2547  __ LoadHeapObject(result, current);
2548  __ cmp(FieldOperand(result, HeapObject::kMapOffset),
2549  Handle<Map>(current->map()));
2550  DeoptimizeIf(not_equal, env);
2551  current =
2552  Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
2553  }
2554  __ mov(result, factory()->undefined_value());
2555  }
2556 }
2557 
2558 
2559 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
2560  ASSERT(!operand->IsDoubleRegister());
2561  if (operand->IsConstantOperand()) {
2562  Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
2563  if (object->IsSmi()) {
2564  __ Push(Handle<Smi>::cast(object));
2565  } else {
2566  __ PushHeapObject(Handle<HeapObject>::cast(object));
2567  }
2568  } else if (operand->IsRegister()) {
2569  __ push(ToRegister(operand));
2570  } else {
2571  __ push(ToOperand(operand));
2572  }
2573 }
2574 
2575 
2576 // Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
2577 // prototype chain, which causes unbounded code generation.
2578 static bool CompactEmit(SmallMapList* list,
2579  Handle<String> name,
2580  int i,
2581  Isolate* isolate) {
2582  Handle<Map> map = list->at(i);
2583  // If the map has ElementsKind transitions, we will generate map checks
2584  // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
2585  if (map->HasElementsTransition()) return false;
2586  LookupResult lookup(isolate);
2587  map->LookupDescriptor(NULL, *name, &lookup);
2588  return lookup.IsField() || lookup.IsConstantFunction();
2589 }
2590 
2591 
2592 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2593  Register object = ToRegister(instr->object());
2594  Register result = ToRegister(instr->result());
2595 
2596  int map_count = instr->hydrogen()->types()->length();
2597  bool need_generic = instr->hydrogen()->need_generic();
2598 
2599  if (map_count == 0 && !need_generic) {
2600  DeoptimizeIf(no_condition, instr->environment());
2601  return;
2602  }
2603  Handle<String> name = instr->hydrogen()->name();
2604  Label done;
2605  bool all_are_compact = true;
2606  for (int i = 0; i < map_count; ++i) {
2607  if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
2608  all_are_compact = false;
2609  break;
2610  }
2611  }
2612  for (int i = 0; i < map_count; ++i) {
2613  bool last = (i == map_count - 1);
2614  Handle<Map> map = instr->hydrogen()->types()->at(i);
2615  Label check_passed;
2616  __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
2617  if (last && !need_generic) {
2618  DeoptimizeIf(not_equal, instr->environment());
2619  __ bind(&check_passed);
2620  EmitLoadFieldOrConstantFunction(
2621  result, object, map, name, instr->environment());
2622  } else {
2623  Label next;
2624  bool compact = all_are_compact ? true :
2625  CompactEmit(instr->hydrogen()->types(), name, i, isolate());
2626  __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
2627  __ bind(&check_passed);
2628  EmitLoadFieldOrConstantFunction(
2629  result, object, map, name, instr->environment());
2630  __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
2631  __ bind(&next);
2632  }
2633  }
2634  if (need_generic) {
2635  __ mov(ecx, name);
2636  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2637  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2638  }
2639  __ bind(&done);
2640 }
2641 
2642 
2643 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2644  ASSERT(ToRegister(instr->context()).is(esi));
2645  ASSERT(ToRegister(instr->object()).is(edx));
2646  ASSERT(ToRegister(instr->result()).is(eax));
2647 
2648  __ mov(ecx, instr->name());
2649  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2650  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2651 }
2652 
2653 
2654 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2655  Register function = ToRegister(instr->function());
2656  Register temp = ToRegister(instr->temp());
2657  Register result = ToRegister(instr->result());
2658 
2659  // Check that the function really is a function.
2660  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2661  DeoptimizeIf(not_equal, instr->environment());
2662 
2663  // Check whether the function has an instance prototype.
2664  Label non_instance;
2665  __ test_b(FieldOperand(result, Map::kBitFieldOffset),
2667  __ j(not_zero, &non_instance, Label::kNear);
2668 
2669  // Get the prototype or initial map from the function.
2670  __ mov(result,
2672 
2673  // Check that the function has a prototype or an initial map.
2674  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
2675  DeoptimizeIf(equal, instr->environment());
2676 
2677  // If the function does not have an initial map, we're done.
2678  Label done;
2679  __ CmpObjectType(result, MAP_TYPE, temp);
2680  __ j(not_equal, &done, Label::kNear);
2681 
2682  // Get the prototype from the initial map.
2683  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
2684  __ jmp(&done, Label::kNear);
2685 
2686  // Non-instance prototype: Fetch prototype from constructor field
2687  // in the function's map.
2688  __ bind(&non_instance);
2689  __ mov(result, FieldOperand(result, Map::kConstructorOffset));
2690 
2691  // All done.
2692  __ bind(&done);
2693 }
2694 
2695 
2696 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2697  Register result = ToRegister(instr->result());
2698  Register input = ToRegister(instr->object());
2699  __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
2700  if (FLAG_debug_code) {
2701  Label done, ok, fail;
2702  __ cmp(FieldOperand(result, HeapObject::kMapOffset),
2703  Immediate(factory()->fixed_array_map()));
2704  __ j(equal, &done, Label::kNear);
2705  __ cmp(FieldOperand(result, HeapObject::kMapOffset),
2706  Immediate(factory()->fixed_cow_array_map()));
2707  __ j(equal, &done, Label::kNear);
2708  Register temp((result.is(eax)) ? ebx : eax);
2709  __ push(temp);
2710  __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
2711  __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
2712  __ and_(temp, Map::kElementsKindMask);
2713  __ shr(temp, Map::kElementsKindShift);
2714  __ cmp(temp, GetInitialFastElementsKind());
2715  __ j(less, &fail, Label::kNear);
2716  __ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
2717  __ j(less_equal, &ok, Label::kNear);
2719  __ j(less, &fail, Label::kNear);
2721  __ j(less_equal, &ok, Label::kNear);
2722  __ bind(&fail);
2723  __ Abort("Check for fast or external elements failed.");
2724  __ bind(&ok);
2725  __ pop(temp);
2726  __ bind(&done);
2727  }
2728 }
2729 
2730 
2731 void LCodeGen::DoLoadExternalArrayPointer(
2732  LLoadExternalArrayPointer* instr) {
2733  Register result = ToRegister(instr->result());
2734  Register input = ToRegister(instr->object());
2735  __ mov(result, FieldOperand(input,
2737 }
2738 
2739 
2740 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2741  Register arguments = ToRegister(instr->arguments());
2742  Register length = ToRegister(instr->length());
2743  Operand index = ToOperand(instr->index());
2744  Register result = ToRegister(instr->result());
2745  // There are two words between the frame pointer and the last argument.
2746  // Subtracting from length accounts for one of them add one more.
2747  __ sub(length, index);
2748  __ mov(result, Operand(arguments, length, times_4, kPointerSize));
2749 }
2750 
2751 
2752 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2753  Register result = ToRegister(instr->result());
2754 
2755  // Load the result.
2756  __ mov(result,
2757  BuildFastArrayOperand(instr->elements(),
2758  instr->key(),
2759  instr->hydrogen()->key()->representation(),
2760  FAST_ELEMENTS,
2762  instr->additional_index()));
2763 
2764  // Check for the hole value.
2765  if (instr->hydrogen()->RequiresHoleCheck()) {
2766  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2767  __ test(result, Immediate(kSmiTagMask));
2768  DeoptimizeIf(not_equal, instr->environment());
2769  } else {
2770  __ cmp(result, factory()->the_hole_value());
2771  DeoptimizeIf(equal, instr->environment());
2772  }
2773  }
2774 }
2775 
2776 
2777 void LCodeGen::DoLoadKeyedFastDoubleElement(
2778  LLoadKeyedFastDoubleElement* instr) {
2779  XMMRegister result = ToDoubleRegister(instr->result());
2780 
2781  if (instr->hydrogen()->RequiresHoleCheck()) {
2782  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
2783  sizeof(kHoleNanLower32);
2784  Operand hole_check_operand = BuildFastArrayOperand(
2785  instr->elements(), instr->key(),
2786  instr->hydrogen()->key()->representation(),
2788  offset,
2789  instr->additional_index());
2790  __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
2791  DeoptimizeIf(equal, instr->environment());
2792  }
2793 
2794  Operand double_load_operand = BuildFastArrayOperand(
2795  instr->elements(),
2796  instr->key(),
2797  instr->hydrogen()->key()->representation(),
2800  instr->additional_index());
2801  __ movdbl(result, double_load_operand);
2802 }
2803 
2804 
2805 Operand LCodeGen::BuildFastArrayOperand(
2806  LOperand* elements_pointer,
2807  LOperand* key,
2808  Representation key_representation,
2809  ElementsKind elements_kind,
2810  uint32_t offset,
2811  uint32_t additional_index) {
2812  Register elements_pointer_reg = ToRegister(elements_pointer);
2813  int shift_size = ElementsKindToShiftSize(elements_kind);
2814  // Even though the HLoad/StoreKeyedFastElement instructions force the input
2815  // representation for the key to be an integer, the input gets replaced during
2816  // bound check elimination with the index argument to the bounds check, which
2817  // can be tagged, so that case must be handled here, too.
2818  if (key_representation.IsTagged() && (shift_size >= 1)) {
2819  shift_size -= kSmiTagSize;
2820  }
2821  if (key->IsConstantOperand()) {
2822  int constant_value = ToInteger32(LConstantOperand::cast(key));
2823  if (constant_value & 0xF0000000) {
2824  Abort("array index constant value too big");
2825  }
2826  return Operand(elements_pointer_reg,
2827  ((constant_value + additional_index) << shift_size)
2828  + offset);
2829  } else {
2830  ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
2831  return Operand(elements_pointer_reg,
2832  ToRegister(key),
2833  scale_factor,
2834  offset + (additional_index << shift_size));
2835  }
2836 }
2837 
2838 
2839 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2840  LLoadKeyedSpecializedArrayElement* instr) {
2841  ElementsKind elements_kind = instr->elements_kind();
2842  LOperand* key = instr->key();
2843  if (!key->IsConstantOperand() &&
2844  ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
2845  elements_kind)) {
2846  __ SmiUntag(ToRegister(key));
2847  }
2848  Operand operand(BuildFastArrayOperand(
2849  instr->external_pointer(),
2850  key,
2851  instr->hydrogen()->key()->representation(),
2852  elements_kind,
2853  0,
2854  instr->additional_index()));
2855  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2856  XMMRegister result(ToDoubleRegister(instr->result()));
2857  __ movss(result, operand);
2858  __ cvtss2sd(result, result);
2859  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2860  __ movdbl(ToDoubleRegister(instr->result()), operand);
2861  } else {
2862  Register result(ToRegister(instr->result()));
2863  switch (elements_kind) {
2865  __ movsx_b(result, operand);
2866  break;
2869  __ movzx_b(result, operand);
2870  break;
2872  __ movsx_w(result, operand);
2873  break;
2875  __ movzx_w(result, operand);
2876  break;
2877  case EXTERNAL_INT_ELEMENTS:
2878  __ mov(result, operand);
2879  break;
2881  __ mov(result, operand);
2882  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2883  __ test(result, Operand(result));
2884  DeoptimizeIf(negative, instr->environment());
2885  }
2886  break;
2889  case FAST_SMI_ELEMENTS:
2890  case FAST_ELEMENTS:
2891  case FAST_DOUBLE_ELEMENTS:
2893  case FAST_HOLEY_ELEMENTS:
2895  case DICTIONARY_ELEMENTS:
2897  UNREACHABLE();
2898  break;
2899  }
2900  }
2901 }
2902 
2903 
2904 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2905  ASSERT(ToRegister(instr->context()).is(esi));
2906  ASSERT(ToRegister(instr->object()).is(edx));
2907  ASSERT(ToRegister(instr->key()).is(ecx));
2908 
2909  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2910  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2911 }
2912 
2913 
2914 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2915  Register result = ToRegister(instr->result());
2916 
2917  if (instr->hydrogen()->from_inlined()) {
2918  __ lea(result, Operand(esp, -2 * kPointerSize));
2919  } else {
2920  // Check for arguments adapter frame.
2921  Label done, adapted;
2922  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2923  __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
2924  __ cmp(Operand(result),
2926  __ j(equal, &adapted, Label::kNear);
2927 
2928  // No arguments adaptor frame.
2929  __ mov(result, Operand(ebp));
2930  __ jmp(&done, Label::kNear);
2931 
2932  // Arguments adaptor frame present.
2933  __ bind(&adapted);
2934  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2935 
2936  // Result is the frame pointer for the frame if not adapted and for the real
2937  // frame below the adaptor frame if adapted.
2938  __ bind(&done);
2939  }
2940 }
2941 
2942 
2943 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2944  Operand elem = ToOperand(instr->elements());
2945  Register result = ToRegister(instr->result());
2946 
2947  Label done;
2948 
2949  // If no arguments adaptor frame the number of arguments is fixed.
2950  __ cmp(ebp, elem);
2951  __ mov(result, Immediate(scope()->num_parameters()));
2952  __ j(equal, &done, Label::kNear);
2953 
2954  // Arguments adaptor frame present. Get argument length from there.
2955  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2956  __ mov(result, Operand(result,
2958  __ SmiUntag(result);
2959 
2960  // Argument length is in result register.
2961  __ bind(&done);
2962 }
2963 
2964 
2965 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2966  Register receiver = ToRegister(instr->receiver());
2967  Register function = ToRegister(instr->function());
2968  Register scratch = ToRegister(instr->temp());
2969 
2970  // If the receiver is null or undefined, we have to pass the global
2971  // object as a receiver to normal functions. Values have to be
2972  // passed unchanged to builtins and strict-mode functions.
2973  Label global_object, receiver_ok;
2974 
2975  // Do not transform the receiver to object for strict mode
2976  // functions.
2977  __ mov(scratch,
2981  __ j(not_equal, &receiver_ok); // A near jump is not sufficient here!
2982 
2983  // Do not transform the receiver to object for builtins.
2986  __ j(not_equal, &receiver_ok);
2987 
2988  // Normal function. Replace undefined or null with global receiver.
2989  __ cmp(receiver, factory()->null_value());
2990  __ j(equal, &global_object, Label::kNear);
2991  __ cmp(receiver, factory()->undefined_value());
2992  __ j(equal, &global_object, Label::kNear);
2993 
2994  // The receiver should be a JS object.
2995  __ test(receiver, Immediate(kSmiTagMask));
2996  DeoptimizeIf(equal, instr->environment());
2997  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
2998  DeoptimizeIf(below, instr->environment());
2999  __ jmp(&receiver_ok, Label::kNear);
3000 
3001  __ bind(&global_object);
3002  // TODO(kmillikin): We have a hydrogen value for the global object. See
3003  // if it's better to use it than to explicitly fetch it from the context
3004  // here.
3005  __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
3006  __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
3007  __ mov(receiver,
3009  __ bind(&receiver_ok);
3010 }
3011 
3012 
3013 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3014  Register receiver = ToRegister(instr->receiver());
3015  Register function = ToRegister(instr->function());
3016  Register length = ToRegister(instr->length());
3017  Register elements = ToRegister(instr->elements());
3018  ASSERT(receiver.is(eax)); // Used for parameter count.
3019  ASSERT(function.is(edi)); // Required by InvokeFunction.
3020  ASSERT(ToRegister(instr->result()).is(eax));
3021 
3022  // Copy the arguments to this function possibly from the
3023  // adaptor frame below it.
3024  const uint32_t kArgumentsLimit = 1 * KB;
3025  __ cmp(length, kArgumentsLimit);
3026  DeoptimizeIf(above, instr->environment());
3027 
3028  __ push(receiver);
3029  __ mov(receiver, length);
3030 
3031  // Loop through the arguments pushing them onto the execution
3032  // stack.
3033  Label invoke, loop;
3034  // length is a small non-negative integer, due to the test above.
3035  __ test(length, Operand(length));
3036  __ j(zero, &invoke, Label::kNear);
3037  __ bind(&loop);
3038  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3039  __ dec(length);
3040  __ j(not_zero, &loop);
3041 
3042  // Invoke the function.
3043  __ bind(&invoke);
3044  ASSERT(instr->HasPointerMap());
3045  LPointerMap* pointers = instr->pointer_map();
3046  RecordPosition(pointers->position());
3047  SafepointGenerator safepoint_generator(
3048  this, pointers, Safepoint::kLazyDeopt);
3049  ParameterCount actual(eax);
3050  __ InvokeFunction(function, actual, CALL_FUNCTION,
3051  safepoint_generator, CALL_AS_METHOD);
3052 }
3053 
3054 
3055 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3056  LOperand* argument = instr->value();
3057  EmitPushTaggedOperand(argument);
3058 }
3059 
3060 
3061 void LCodeGen::DoDrop(LDrop* instr) {
3062  __ Drop(instr->count());
3063 }
3064 
3065 
3066 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3067  Register result = ToRegister(instr->result());
3068  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3069 }
3070 
3071 
3072 void LCodeGen::DoContext(LContext* instr) {
3073  Register result = ToRegister(instr->result());
3074  __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3075 }
3076 
3077 
3078 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3079  Register context = ToRegister(instr->context());
3080  Register result = ToRegister(instr->result());
3081  __ mov(result,
3082  Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3083 }
3084 
3085 
3086 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3087  ASSERT(ToRegister(instr->context()).is(esi));
3088  __ push(esi); // The context is the first argument.
3089  __ push(Immediate(instr->hydrogen()->pairs()));
3090  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3091  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3092 }
3093 
3094 
3095 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3096  Register context = ToRegister(instr->context());
3097  Register result = ToRegister(instr->result());
3098  __ mov(result,
3100 }
3101 
3102 
3103 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3104  Register global = ToRegister(instr->global());
3105  Register result = ToRegister(instr->result());
3106  __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
3107 }
3108 
3109 
3110 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3111  int arity,
3112  LInstruction* instr,
3113  CallKind call_kind,
3114  EDIState edi_state) {
3115  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
3116  function->shared()->formal_parameter_count() == arity;
3117 
3118  LPointerMap* pointers = instr->pointer_map();
3119  RecordPosition(pointers->position());
3120 
3121  if (can_invoke_directly) {
3122  if (edi_state == EDI_UNINITIALIZED) {
3123  __ LoadHeapObject(edi, function);
3124  }
3125 
3126  // Change context.
3128 
3129  // Set eax to arguments count if adaption is not needed. Assumes that eax
3130  // is available to write to at this point.
3131  if (!function->NeedsArgumentsAdaption()) {
3132  __ mov(eax, arity);
3133  }
3134 
3135  // Invoke function directly.
3136  __ SetCallKind(ecx, call_kind);
3137  if (*function == *info()->closure()) {
3138  __ CallSelf();
3139  } else {
3141  }
3142  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3143  } else {
3144  // We need to adapt arguments.
3145  SafepointGenerator generator(
3146  this, pointers, Safepoint::kLazyDeopt);
3147  ParameterCount count(arity);
3148  __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
3149  }
3150 }
3151 
3152 
3153 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3154  ASSERT(ToRegister(instr->result()).is(eax));
3155  CallKnownFunction(instr->function(),
3156  instr->arity(),
3157  instr,
3159  EDI_UNINITIALIZED);
3160 }
3161 
3162 
3163 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3164  Register input_reg = ToRegister(instr->value());
3165  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3166  factory()->heap_number_map());
3167  DeoptimizeIf(not_equal, instr->environment());
3168 
3169  Label done;
3170  Register tmp = input_reg.is(eax) ? ecx : eax;
3171  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3172 
3173  // Preserve the value of all registers.
3174  PushSafepointRegistersScope scope(this);
3175 
3176  Label negative;
3177  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3178  // Check the sign of the argument. If the argument is positive, just
3179  // return it. We do not need to patch the stack since |input| and
3180  // |result| are the same register and |input| will be restored
3181  // unchanged by popping safepoint registers.
3182  __ test(tmp, Immediate(HeapNumber::kSignMask));
3183  __ j(not_zero, &negative);
3184  __ jmp(&done);
3185 
3186  __ bind(&negative);
3187 
3188  Label allocated, slow;
3189  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3190  __ jmp(&allocated);
3191 
3192  // Slow case: Call the runtime system to do the number allocation.
3193  __ bind(&slow);
3194 
3195  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3196  instr, instr->context());
3197 
3198  // Set the pointer to the new heap number in tmp.
3199  if (!tmp.is(eax)) __ mov(tmp, eax);
3200 
3201  // Restore input_reg after call to runtime.
3202  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3203 
3204  __ bind(&allocated);
3205  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3206  __ and_(tmp2, ~HeapNumber::kSignMask);
3207  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3208  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3209  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3210  __ StoreToSafepointRegisterSlot(input_reg, tmp);
3211 
3212  __ bind(&done);
3213 }
3214 
3215 
3216 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3217  Register input_reg = ToRegister(instr->value());
3218  __ test(input_reg, Operand(input_reg));
3219  Label is_positive;
3220  __ j(not_sign, &is_positive);
3221  __ neg(input_reg);
3222  __ test(input_reg, Operand(input_reg));
3223  DeoptimizeIf(negative, instr->environment());
3224  __ bind(&is_positive);
3225 }
3226 
3227 
3228 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3229  // Class for deferred case.
3230  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3231  public:
3232  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3233  LUnaryMathOperation* instr)
3234  : LDeferredCode(codegen), instr_(instr) { }
3235  virtual void Generate() {
3236  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3237  }
3238  virtual LInstruction* instr() { return instr_; }
3239  private:
3240  LUnaryMathOperation* instr_;
3241  };
3242 
3243  ASSERT(instr->value()->Equals(instr->result()));
3244  Representation r = instr->hydrogen()->value()->representation();
3245 
3246  if (r.IsDouble()) {
3247  XMMRegister scratch = xmm0;
3248  XMMRegister input_reg = ToDoubleRegister(instr->value());
3249  __ xorps(scratch, scratch);
3250  __ subsd(scratch, input_reg);
3251  __ pand(input_reg, scratch);
3252  } else if (r.IsInteger32()) {
3253  EmitIntegerMathAbs(instr);
3254  } else { // Tagged case.
3255  DeferredMathAbsTaggedHeapNumber* deferred =
3256  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3257  Register input_reg = ToRegister(instr->value());
3258  // Smi check.
3259  __ JumpIfNotSmi(input_reg, deferred->entry());
3260  EmitIntegerMathAbs(instr);
3261  __ bind(deferred->exit());
3262  }
3263 }
3264 
3265 
3266 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3267  XMMRegister xmm_scratch = xmm0;
3268  Register output_reg = ToRegister(instr->result());
3269  XMMRegister input_reg = ToDoubleRegister(instr->value());
3270 
3272  CpuFeatures::Scope scope(SSE4_1);
3273  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3274  // Deoptimize on negative zero.
3275  Label non_zero;
3276  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3277  __ ucomisd(input_reg, xmm_scratch);
3278  __ j(not_equal, &non_zero, Label::kNear);
3279  __ movmskpd(output_reg, input_reg);
3280  __ test(output_reg, Immediate(1));
3281  DeoptimizeIf(not_zero, instr->environment());
3282  __ bind(&non_zero);
3283  }
3284  __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3285  __ cvttsd2si(output_reg, Operand(xmm_scratch));
3286  // Overflow is signalled with minint.
3287  __ cmp(output_reg, 0x80000000u);
3288  DeoptimizeIf(equal, instr->environment());
3289  } else {
3290  Label negative_sign, done;
3291  // Deoptimize on unordered.
3292  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3293  __ ucomisd(input_reg, xmm_scratch);
3294  DeoptimizeIf(parity_even, instr->environment());
3295  __ j(below, &negative_sign, Label::kNear);
3296 
3297  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3298  // Check for negative zero.
3299  Label positive_sign;
3300  __ j(above, &positive_sign, Label::kNear);
3301  __ movmskpd(output_reg, input_reg);
3302  __ test(output_reg, Immediate(1));
3303  DeoptimizeIf(not_zero, instr->environment());
3304  __ Set(output_reg, Immediate(0));
3305  __ jmp(&done, Label::kNear);
3306  __ bind(&positive_sign);
3307  }
3308 
3309  // Use truncating instruction (OK because input is positive).
3310  __ cvttsd2si(output_reg, Operand(input_reg));
3311  // Overflow is signalled with minint.
3312  __ cmp(output_reg, 0x80000000u);
3313  DeoptimizeIf(equal, instr->environment());
3314  __ jmp(&done, Label::kNear);
3315 
3316  // Non-zero negative reaches here.
3317  __ bind(&negative_sign);
3318  // Truncate, then compare and compensate.
3319  __ cvttsd2si(output_reg, Operand(input_reg));
3320  __ cvtsi2sd(xmm_scratch, output_reg);
3321  __ ucomisd(input_reg, xmm_scratch);
3322  __ j(equal, &done, Label::kNear);
3323  __ sub(output_reg, Immediate(1));
3324  DeoptimizeIf(overflow, instr->environment());
3325 
3326  __ bind(&done);
3327  }
3328 }
3329 
3330 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3331  XMMRegister xmm_scratch = xmm0;
3332  Register output_reg = ToRegister(instr->result());
3333  XMMRegister input_reg = ToDoubleRegister(instr->value());
3334 
3335  Label below_half, done;
3336  // xmm_scratch = 0.5
3337  ExternalReference one_half = ExternalReference::address_of_one_half();
3338  __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
3339  __ ucomisd(xmm_scratch, input_reg);
3340  __ j(above, &below_half);
3341  // xmm_scratch = input + 0.5
3342  __ addsd(xmm_scratch, input_reg);
3343 
3344  // Compute Math.floor(value + 0.5).
3345  // Use truncating instruction (OK because input is positive).
3346  __ cvttsd2si(output_reg, Operand(xmm_scratch));
3347 
3348  // Overflow is signalled with minint.
3349  __ cmp(output_reg, 0x80000000u);
3350  DeoptimizeIf(equal, instr->environment());
3351  __ jmp(&done);
3352 
3353  __ bind(&below_half);
3354 
3355  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3356  // we can ignore the difference between a result of -0 and +0.
3357  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3358  // If the sign is positive, we return +0.
3359  __ movmskpd(output_reg, input_reg);
3360  __ test(output_reg, Immediate(1));
3361  DeoptimizeIf(not_zero, instr->environment());
3362  } else {
3363  // If the input is >= -0.5, we return +0.
3364  __ mov(output_reg, Immediate(0xBF000000));
3365  __ movd(xmm_scratch, Operand(output_reg));
3366  __ cvtss2sd(xmm_scratch, xmm_scratch);
3367  __ ucomisd(input_reg, xmm_scratch);
3368  DeoptimizeIf(below, instr->environment());
3369  }
3370  __ Set(output_reg, Immediate(0));
3371  __ bind(&done);
3372 }
3373 
3374 
3375 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3376  XMMRegister input_reg = ToDoubleRegister(instr->value());
3377  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3378  __ sqrtsd(input_reg, input_reg);
3379 }
3380 
3381 
3382 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3383  XMMRegister xmm_scratch = xmm0;
3384  XMMRegister input_reg = ToDoubleRegister(instr->value());
3385  Register scratch = ToRegister(instr->temp());
3386  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3387 
3388  // Note that according to ECMA-262 15.8.2.13:
3389  // Math.pow(-Infinity, 0.5) == Infinity
3390  // Math.sqrt(-Infinity) == NaN
3391  Label done, sqrt;
3392  // Check base for -Infinity. According to IEEE-754, single-precision
3393  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
3394  __ mov(scratch, 0xFF800000);
3395  __ movd(xmm_scratch, scratch);
3396  __ cvtss2sd(xmm_scratch, xmm_scratch);
3397  __ ucomisd(input_reg, xmm_scratch);
3398  // Comparing -Infinity with NaN results in "unordered", which sets the
3399  // zero flag as if both were equal. However, it also sets the carry flag.
3400  __ j(not_equal, &sqrt, Label::kNear);
3401  __ j(carry, &sqrt, Label::kNear);
3402  // If input is -Infinity, return Infinity.
3403  __ xorps(input_reg, input_reg);
3404  __ subsd(input_reg, xmm_scratch);
3405  __ jmp(&done, Label::kNear);
3406 
3407  // Square root.
3408  __ bind(&sqrt);
3409  __ xorps(xmm_scratch, xmm_scratch);
3410  __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3411  __ sqrtsd(input_reg, input_reg);
3412  __ bind(&done);
3413 }
3414 
3415 
3416 void LCodeGen::DoPower(LPower* instr) {
3417  Representation exponent_type = instr->hydrogen()->right()->representation();
3418  // Having marked this as a call, we can use any registers.
3419  // Just make sure that the input/output registers are the expected ones.
3420  ASSERT(!instr->right()->IsDoubleRegister() ||
3421  ToDoubleRegister(instr->right()).is(xmm1));
3422  ASSERT(!instr->right()->IsRegister() ||
3423  ToRegister(instr->right()).is(eax));
3424  ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
3425  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
3426 
3427  if (exponent_type.IsTagged()) {
3428  Label no_deopt;
3429  __ JumpIfSmi(eax, &no_deopt);
3430  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
3431  DeoptimizeIf(not_equal, instr->environment());
3432  __ bind(&no_deopt);
3433  MathPowStub stub(MathPowStub::TAGGED);
3434  __ CallStub(&stub);
3435  } else if (exponent_type.IsInteger32()) {
3436  MathPowStub stub(MathPowStub::INTEGER);
3437  __ CallStub(&stub);
3438  } else {
3439  ASSERT(exponent_type.IsDouble());
3440  MathPowStub stub(MathPowStub::DOUBLE);
3441  __ CallStub(&stub);
3442  }
3443 }
3444 
3445 
3446 void LCodeGen::DoRandom(LRandom* instr) {
3447  class DeferredDoRandom: public LDeferredCode {
3448  public:
3449  DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3450  : LDeferredCode(codegen), instr_(instr) { }
3451  virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3452  virtual LInstruction* instr() { return instr_; }
3453  private:
3454  LRandom* instr_;
3455  };
3456 
3457  DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3458 
3459  // Having marked this instruction as a call we can use any
3460  // registers.
3461  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3462  ASSERT(ToRegister(instr->global_object()).is(eax));
3463  // Assert that the register size is indeed the size of each seed.
3464  static const int kSeedSize = sizeof(uint32_t);
3465  STATIC_ASSERT(kPointerSize == kSeedSize);
3466 
3468  static const int kRandomSeedOffset =
3470  __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
3471  // ebx: FixedArray of the native context's random seeds
3472 
3473  // Load state[0].
3475  // If state[0] == 0, call runtime to initialize seeds.
3476  __ test(ecx, ecx);
3477  __ j(zero, deferred->entry());
3478  // Load state[1].
3479  __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
3480  // ecx: state[0]
3481  // eax: state[1]
3482 
3483  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3484  __ movzx_w(edx, ecx);
3485  __ imul(edx, edx, 18273);
3486  __ shr(ecx, 16);
3487  __ add(ecx, edx);
3488  // Save state[0].
3490 
3491  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3492  __ movzx_w(edx, eax);
3493  __ imul(edx, edx, 36969);
3494  __ shr(eax, 16);
3495  __ add(eax, edx);
3496  // Save state[1].
3497  __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
3498 
3499  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3500  __ shl(ecx, 14);
3501  __ and_(eax, Immediate(0x3FFFF));
3502  __ add(eax, ecx);
3503 
3504  __ bind(deferred->exit());
3505  // Convert 32 random bits in eax to 0.(32 random bits) in a double
3506  // by computing:
3507  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
3508  __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
3509  __ movd(xmm2, ebx);
3510  __ movd(xmm1, eax);
3511  __ cvtss2sd(xmm2, xmm2);
3512  __ xorps(xmm1, xmm2);
3513  __ subsd(xmm1, xmm2);
3514 }
3515 
3516 
3517 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3518  __ PrepareCallCFunction(1, ebx);
3519  __ mov(Operand(esp, 0), eax);
3520  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3521  // Return value is in eax.
3522 }
3523 
3524 
3525 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3526  ASSERT(instr->value()->Equals(instr->result()));
3527  XMMRegister input_reg = ToDoubleRegister(instr->value());
3528  Label positive, done, zero;
3529  __ xorps(xmm0, xmm0);
3530  __ ucomisd(input_reg, xmm0);
3531  __ j(above, &positive, Label::kNear);
3532  __ j(equal, &zero, Label::kNear);
3533  ExternalReference nan =
3534  ExternalReference::address_of_canonical_non_hole_nan();
3535  __ movdbl(input_reg, Operand::StaticVariable(nan));
3536  __ jmp(&done, Label::kNear);
3537  __ bind(&zero);
3538  __ push(Immediate(0xFFF00000));
3539  __ push(Immediate(0));
3540  __ movdbl(input_reg, Operand(esp, 0));
3541  __ add(Operand(esp), Immediate(kDoubleSize));
3542  __ jmp(&done, Label::kNear);
3543  __ bind(&positive);
3544  __ fldln2();
3545  __ sub(Operand(esp), Immediate(kDoubleSize));
3546  __ movdbl(Operand(esp, 0), input_reg);
3547  __ fld_d(Operand(esp, 0));
3548  __ fyl2x();
3549  __ fstp_d(Operand(esp, 0));
3550  __ movdbl(input_reg, Operand(esp, 0));
3551  __ add(Operand(esp), Immediate(kDoubleSize));
3552  __ bind(&done);
3553 }
3554 
3555 
3556 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3557  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3558  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3560  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3561 }
3562 
3563 
3564 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3565  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3566  TranscendentalCacheStub stub(TranscendentalCache::COS,
3568  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3569 }
3570 
3571 
3572 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3573  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3574  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3576  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3577 }
3578 
3579 
3580 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3581  switch (instr->op()) {
3582  case kMathAbs:
3583  DoMathAbs(instr);
3584  break;
3585  case kMathFloor:
3586  DoMathFloor(instr);
3587  break;
3588  case kMathRound:
3589  DoMathRound(instr);
3590  break;
3591  case kMathSqrt:
3592  DoMathSqrt(instr);
3593  break;
3594  case kMathCos:
3595  DoMathCos(instr);
3596  break;
3597  case kMathSin:
3598  DoMathSin(instr);
3599  break;
3600  case kMathTan:
3601  DoMathTan(instr);
3602  break;
3603  case kMathLog:
3604  DoMathLog(instr);
3605  break;
3606 
3607  default:
3608  UNREACHABLE();
3609  }
3610 }
3611 
3612 
3613 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3614  ASSERT(ToRegister(instr->context()).is(esi));
3615  ASSERT(ToRegister(instr->function()).is(edi));
3616  ASSERT(instr->HasPointerMap());
3617 
3618  if (instr->known_function().is_null()) {
3619  LPointerMap* pointers = instr->pointer_map();
3620  RecordPosition(pointers->position());
3621  SafepointGenerator generator(
3622  this, pointers, Safepoint::kLazyDeopt);
3623  ParameterCount count(instr->arity());
3624  __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3625  } else {
3626  CallKnownFunction(instr->known_function(),
3627  instr->arity(),
3628  instr,
3630  EDI_CONTAINS_TARGET);
3631  }
3632 }
3633 
3634 
3635 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3636  ASSERT(ToRegister(instr->context()).is(esi));
3637  ASSERT(ToRegister(instr->key()).is(ecx));
3638  ASSERT(ToRegister(instr->result()).is(eax));
3639 
3640  int arity = instr->arity();
3641  Handle<Code> ic =
3642  isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3643  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3644 }
3645 
3646 
3647 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3648  ASSERT(ToRegister(instr->context()).is(esi));
3649  ASSERT(ToRegister(instr->result()).is(eax));
3650 
3651  int arity = instr->arity();
3652  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3653  Handle<Code> ic =
3654  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3655  __ mov(ecx, instr->name());
3656  CallCode(ic, mode, instr);
3657 }
3658 
3659 
3660 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3661  ASSERT(ToRegister(instr->context()).is(esi));
3662  ASSERT(ToRegister(instr->function()).is(edi));
3663  ASSERT(ToRegister(instr->result()).is(eax));
3664 
3665  int arity = instr->arity();
3666  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3667  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3668 }
3669 
3670 
3671 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3672  ASSERT(ToRegister(instr->context()).is(esi));
3673  ASSERT(ToRegister(instr->result()).is(eax));
3674 
3675  int arity = instr->arity();
3676  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3677  Handle<Code> ic =
3678  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3679  __ mov(ecx, instr->name());
3680  CallCode(ic, mode, instr);
3681 }
3682 
3683 
3684 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3685  ASSERT(ToRegister(instr->result()).is(eax));
3686  CallKnownFunction(instr->target(),
3687  instr->arity(),
3688  instr,
3690  EDI_UNINITIALIZED);
3691 }
3692 
3693 
3694 void LCodeGen::DoCallNew(LCallNew* instr) {
3695  ASSERT(ToRegister(instr->context()).is(esi));
3696  ASSERT(ToRegister(instr->constructor()).is(edi));
3697  ASSERT(ToRegister(instr->result()).is(eax));
3698 
3699  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3700  __ Set(eax, Immediate(instr->arity()));
3701  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3702 }
3703 
3704 
3705 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3706  CallRuntime(instr->function(), instr->arity(), instr);
3707 }
3708 
3709 
3710 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3711  Register object = ToRegister(instr->object());
3712  Register value = ToRegister(instr->value());
3713  int offset = instr->offset();
3714 
3715  if (!instr->transition().is_null()) {
3716  if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
3717  __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
3718  } else {
3719  Register temp = ToRegister(instr->temp());
3720  Register temp_map = ToRegister(instr->temp_map());
3721  __ mov(temp_map, instr->transition());
3722  __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
3723  // Update the write barrier for the map field.
3724  __ RecordWriteField(object,
3726  temp_map,
3727  temp,
3728  kSaveFPRegs,
3730  OMIT_SMI_CHECK);
3731  }
3732  }
3733 
3734  // Do the store.
3735  HType type = instr->hydrogen()->value()->type();
3736  SmiCheck check_needed =
3737  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3738  if (instr->is_in_object()) {
3739  __ mov(FieldOperand(object, offset), value);
3740  if (instr->hydrogen()->NeedsWriteBarrier()) {
3741  Register temp = ToRegister(instr->temp());
3742  // Update the write barrier for the object for in-object properties.
3743  __ RecordWriteField(object,
3744  offset,
3745  value,
3746  temp,
3747  kSaveFPRegs,
3749  check_needed);
3750  }
3751  } else {
3752  Register temp = ToRegister(instr->temp());
3753  __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
3754  __ mov(FieldOperand(temp, offset), value);
3755  if (instr->hydrogen()->NeedsWriteBarrier()) {
3756  // Update the write barrier for the properties array.
3757  // object is used as a scratch register.
3758  __ RecordWriteField(temp,
3759  offset,
3760  value,
3761  object,
3762  kSaveFPRegs,
3764  check_needed);
3765  }
3766  }
3767 }
3768 
3769 
3770 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3771  ASSERT(ToRegister(instr->context()).is(esi));
3772  ASSERT(ToRegister(instr->object()).is(edx));
3773  ASSERT(ToRegister(instr->value()).is(eax));
3774 
3775  __ mov(ecx, instr->name());
3776  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3777  ? isolate()->builtins()->StoreIC_Initialize_Strict()
3778  : isolate()->builtins()->StoreIC_Initialize();
3779  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3780 }
3781 
3782 
3783 void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
3784  HValue* value,
3785  LOperand* operand) {
3786  if (value->representation().IsTagged() && !value->type().IsSmi()) {
3787  if (operand->IsRegister()) {
3788  __ test(ToRegister(operand), Immediate(kSmiTagMask));
3789  } else {
3790  __ test(ToOperand(operand), Immediate(kSmiTagMask));
3791  }
3792  DeoptimizeIf(not_zero, environment);
3793  }
3794 }
3795 
3796 
3797 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3798  DeoptIfTaggedButNotSmi(instr->environment(),
3799  instr->hydrogen()->length(),
3800  instr->length());
3801  DeoptIfTaggedButNotSmi(instr->environment(),
3802  instr->hydrogen()->index(),
3803  instr->index());
3804  if (instr->index()->IsConstantOperand()) {
3805  int constant_index =
3806  ToInteger32(LConstantOperand::cast(instr->index()));
3807  if (instr->hydrogen()->length()->representation().IsTagged()) {
3808  __ cmp(ToOperand(instr->length()),
3809  Immediate(Smi::FromInt(constant_index)));
3810  } else {
3811  __ cmp(ToOperand(instr->length()), Immediate(constant_index));
3812  }
3813  DeoptimizeIf(below_equal, instr->environment());
3814  } else {
3815  __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
3816  DeoptimizeIf(above_equal, instr->environment());
3817  }
3818 }
3819 
3820 
3821 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3822  LStoreKeyedSpecializedArrayElement* instr) {
3823  ElementsKind elements_kind = instr->elements_kind();
3824  LOperand* key = instr->key();
3825  if (!key->IsConstantOperand() &&
3826  ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3827  elements_kind)) {
3828  __ SmiUntag(ToRegister(key));
3829  }
3830  Operand operand(BuildFastArrayOperand(
3831  instr->external_pointer(),
3832  key,
3833  instr->hydrogen()->key()->representation(),
3834  elements_kind,
3835  0,
3836  instr->additional_index()));
3837  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3838  __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
3839  __ movss(operand, xmm0);
3840  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3841  __ movdbl(operand, ToDoubleRegister(instr->value()));
3842  } else {
3843  Register value = ToRegister(instr->value());
3844  switch (elements_kind) {
3848  __ mov_b(operand, value);
3849  break;
3852  __ mov_w(operand, value);
3853  break;
3854  case EXTERNAL_INT_ELEMENTS:
3856  __ mov(operand, value);
3857  break;
3860  case FAST_SMI_ELEMENTS:
3861  case FAST_ELEMENTS:
3862  case FAST_DOUBLE_ELEMENTS:
3864  case FAST_HOLEY_ELEMENTS:
3866  case DICTIONARY_ELEMENTS:
3868  UNREACHABLE();
3869  break;
3870  }
3871  }
3872 }
3873 
3874 
3875 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3876  Register value = ToRegister(instr->value());
3877  Register elements = ToRegister(instr->object());
3878  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3879 
3880  Operand operand = BuildFastArrayOperand(
3881  instr->object(),
3882  instr->key(),
3883  instr->hydrogen()->key()->representation(),
3884  FAST_ELEMENTS,
3886  instr->additional_index());
3887  __ mov(operand, value);
3888 
3889  if (instr->hydrogen()->NeedsWriteBarrier()) {
3890  ASSERT(!instr->key()->IsConstantOperand());
3891  HType type = instr->hydrogen()->value()->type();
3892  SmiCheck check_needed =
3893  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3894  // Compute address of modified element and store it into key register.
3895  __ lea(key, operand);
3896  __ RecordWrite(elements,
3897  key,
3898  value,
3899  kSaveFPRegs,
3901  check_needed);
3902  }
3903 }
3904 
3905 
3906 void LCodeGen::DoStoreKeyedFastDoubleElement(
3907  LStoreKeyedFastDoubleElement* instr) {
3908  XMMRegister value = ToDoubleRegister(instr->value());
3909 
3910  if (instr->NeedsCanonicalization()) {
3911  Label have_value;
3912 
3913  __ ucomisd(value, value);
3914  __ j(parity_odd, &have_value); // NaN.
3915 
3916  ExternalReference canonical_nan_reference =
3917  ExternalReference::address_of_canonical_non_hole_nan();
3918  __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
3919  __ bind(&have_value);
3920  }
3921 
3922  Operand double_store_operand = BuildFastArrayOperand(
3923  instr->elements(),
3924  instr->key(),
3925  instr->hydrogen()->key()->representation(),
3928  instr->additional_index());
3929  __ movdbl(double_store_operand, value);
3930 }
3931 
3932 
3933 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3934  ASSERT(ToRegister(instr->context()).is(esi));
3935  ASSERT(ToRegister(instr->object()).is(edx));
3936  ASSERT(ToRegister(instr->key()).is(ecx));
3937  ASSERT(ToRegister(instr->value()).is(eax));
3938 
3939  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3940  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3941  : isolate()->builtins()->KeyedStoreIC_Initialize();
3942  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3943 }
3944 
3945 
3946 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3947  Register object_reg = ToRegister(instr->object());
3948  Register new_map_reg = ToRegister(instr->new_map_temp());
3949 
3950  Handle<Map> from_map = instr->original_map();
3951  Handle<Map> to_map = instr->transitioned_map();
3952  ElementsKind from_kind = from_map->elements_kind();
3953  ElementsKind to_kind = to_map->elements_kind();
3954 
3955  Label not_applicable;
3956  bool is_simple_map_transition =
3957  IsSimpleMapChangeTransition(from_kind, to_kind);
3958  Label::Distance branch_distance =
3959  is_simple_map_transition ? Label::kNear : Label::kFar;
3960  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
3961  __ j(not_equal, &not_applicable, branch_distance);
3962  if (is_simple_map_transition) {
3963  Register object_reg = ToRegister(instr->object());
3964  Handle<Map> map = instr->hydrogen()->transitioned_map();
3965  __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
3966  Immediate(map));
3967  // Write barrier.
3968  ASSERT_NE(instr->temp(), NULL);
3969  __ RecordWriteForMap(object_reg, to_map, new_map_reg,
3970  ToRegister(instr->temp()),
3971  kDontSaveFPRegs);
3972  } else if (IsFastSmiElementsKind(from_kind) &&
3973  IsFastDoubleElementsKind(to_kind)) {
3974  __ mov(new_map_reg, to_map);
3975  Register fixed_object_reg = ToRegister(instr->temp());
3976  ASSERT(fixed_object_reg.is(edx));
3977  ASSERT(new_map_reg.is(ebx));
3978  __ mov(fixed_object_reg, object_reg);
3979  CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3980  RelocInfo::CODE_TARGET, instr);
3981  } else if (IsFastDoubleElementsKind(from_kind) &&
3982  IsFastObjectElementsKind(to_kind)) {
3983  __ mov(new_map_reg, to_map);
3984  Register fixed_object_reg = ToRegister(instr->temp());
3985  ASSERT(fixed_object_reg.is(edx));
3986  ASSERT(new_map_reg.is(ebx));
3987  __ mov(fixed_object_reg, object_reg);
3988  CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3989  RelocInfo::CODE_TARGET, instr);
3990  } else {
3991  UNREACHABLE();
3992  }
3993  __ bind(&not_applicable);
3994 }
3995 
3996 
3997 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3998  class DeferredStringCharCodeAt: public LDeferredCode {
3999  public:
4000  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4001  : LDeferredCode(codegen), instr_(instr) { }
4002  virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
4003  virtual LInstruction* instr() { return instr_; }
4004  private:
4005  LStringCharCodeAt* instr_;
4006  };
4007 
4008  DeferredStringCharCodeAt* deferred =
4009  new(zone()) DeferredStringCharCodeAt(this, instr);
4010 
4012  factory(),
4013  ToRegister(instr->string()),
4014  ToRegister(instr->index()),
4015  ToRegister(instr->result()),
4016  deferred->entry());
4017  __ bind(deferred->exit());
4018 }
4019 
4020 
4021 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4022  Register string = ToRegister(instr->string());
4023  Register result = ToRegister(instr->result());
4024 
4025  // TODO(3095996): Get rid of this. For now, we need to make the
4026  // result register contain a valid pointer because it is already
4027  // contained in the register pointer map.
4028  __ Set(result, Immediate(0));
4029 
4030  PushSafepointRegistersScope scope(this);
4031  __ push(string);
4032  // Push the index as a smi. This is safe because of the checks in
4033  // DoStringCharCodeAt above.
4035  if (instr->index()->IsConstantOperand()) {
4036  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4037  __ push(Immediate(Smi::FromInt(const_index)));
4038  } else {
4039  Register index = ToRegister(instr->index());
4040  __ SmiTag(index);
4041  __ push(index);
4042  }
4043  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
4044  instr, instr->context());
4045  __ AssertSmi(eax);
4046  __ SmiUntag(eax);
4047  __ StoreToSafepointRegisterSlot(result, eax);
4048 }
4049 
4050 
4051 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4052  class DeferredStringCharFromCode: public LDeferredCode {
4053  public:
4054  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4055  : LDeferredCode(codegen), instr_(instr) { }
4056  virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4057  virtual LInstruction* instr() { return instr_; }
4058  private:
4059  LStringCharFromCode* instr_;
4060  };
4061 
4062  DeferredStringCharFromCode* deferred =
4063  new(zone()) DeferredStringCharFromCode(this, instr);
4064 
4065  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4066  Register char_code = ToRegister(instr->char_code());
4067  Register result = ToRegister(instr->result());
4068  ASSERT(!char_code.is(result));
4069 
4070  __ cmp(char_code, String::kMaxAsciiCharCode);
4071  __ j(above, deferred->entry());
4072  __ Set(result, Immediate(factory()->single_character_string_cache()));
4073  __ mov(result, FieldOperand(result,
4074  char_code, times_pointer_size,
4076  __ cmp(result, factory()->undefined_value());
4077  __ j(equal, deferred->entry());
4078  __ bind(deferred->exit());
4079 }
4080 
4081 
4082 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4083  Register char_code = ToRegister(instr->char_code());
4084  Register result = ToRegister(instr->result());
4085 
4086  // TODO(3095996): Get rid of this. For now, we need to make the
4087  // result register contain a valid pointer because it is already
4088  // contained in the register pointer map.
4089  __ Set(result, Immediate(0));
4090 
4091  PushSafepointRegistersScope scope(this);
4092  __ SmiTag(char_code);
4093  __ push(char_code);
4094  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4095  __ StoreToSafepointRegisterSlot(result, eax);
4096 }
4097 
4098 
4099 void LCodeGen::DoStringLength(LStringLength* instr) {
4100  Register string = ToRegister(instr->string());
4101  Register result = ToRegister(instr->result());
4102  __ mov(result, FieldOperand(string, String::kLengthOffset));
4103 }
4104 
4105 
4106 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4107  EmitPushTaggedOperand(instr->left());
4108  EmitPushTaggedOperand(instr->right());
4109  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
4110  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4111 }
4112 
4113 
4114 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4115  LOperand* input = instr->value();
4116  ASSERT(input->IsRegister() || input->IsStackSlot());
4117  LOperand* output = instr->result();
4118  ASSERT(output->IsDoubleRegister());
4119  __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4120 }
4121 
4122 
4123 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4124  LOperand* input = instr->value();
4125  LOperand* output = instr->result();
4126  LOperand* temp = instr->temp();
4127 
4128  __ LoadUint32(ToDoubleRegister(output),
4129  ToRegister(input),
4130  ToDoubleRegister(temp));
4131 }
4132 
4133 
4134 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4135  class DeferredNumberTagI: public LDeferredCode {
4136  public:
4137  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4138  : LDeferredCode(codegen), instr_(instr) { }
4139  virtual void Generate() {
4140  codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
4141  }
4142  virtual LInstruction* instr() { return instr_; }
4143  private:
4144  LNumberTagI* instr_;
4145  };
4146 
4147  LOperand* input = instr->value();
4148  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4149  Register reg = ToRegister(input);
4150 
4151  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4152  __ SmiTag(reg);
4153  __ j(overflow, deferred->entry());
4154  __ bind(deferred->exit());
4155 }
4156 
4157 
4158 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4159  class DeferredNumberTagU: public LDeferredCode {
4160  public:
4161  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4162  : LDeferredCode(codegen), instr_(instr) { }
4163  virtual void Generate() {
4164  codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
4165  }
4166  virtual LInstruction* instr() { return instr_; }
4167  private:
4168  LNumberTagU* instr_;
4169  };
4170 
4171  LOperand* input = instr->value();
4172  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4173  Register reg = ToRegister(input);
4174 
4175  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4176  __ cmp(reg, Immediate(Smi::kMaxValue));
4177  __ j(above, deferred->entry());
4178  __ SmiTag(reg);
4179  __ bind(deferred->exit());
4180 }
4181 
4182 
4183 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4184  LOperand* value,
4185  IntegerSignedness signedness) {
4186  Label slow;
4187  Register reg = ToRegister(value);
4188  Register tmp = reg.is(eax) ? ecx : eax;
4189 
4190  // Preserve the value of all registers.
4191  PushSafepointRegistersScope scope(this);
4192 
4193  Label done;
4194 
4195  if (signedness == SIGNED_INT32) {
4196  // There was overflow, so bits 30 and 31 of the original integer
4197  // disagree. Try to allocate a heap number in new space and store
4198  // the value in there. If that fails, call the runtime system.
4199  __ SmiUntag(reg);
4200  __ xor_(reg, 0x80000000);
4201  __ cvtsi2sd(xmm0, Operand(reg));
4202  } else {
4203  __ LoadUint32(xmm0, reg, xmm1);
4204  }
4205 
4206  if (FLAG_inline_new) {
4207  __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4208  __ jmp(&done, Label::kNear);
4209  }
4210 
4211  // Slow case: Call the runtime system to do the number allocation.
4212  __ bind(&slow);
4213 
4214  // TODO(3095996): Put a valid pointer value in the stack slot where the result
4215  // register is stored, as this register is in the pointer map, but contains an
4216  // integer value.
4217  __ StoreToSafepointRegisterSlot(reg, Immediate(0));
4218  // NumberTagI and NumberTagD use the context from the frame, rather than
4219  // the environment's HContext or HInlinedContext value.
4220  // They only call Runtime::kAllocateHeapNumber.
4221  // The corresponding HChange instructions are added in a phase that does
4222  // not have easy access to the local context.
4224  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4225  RecordSafepointWithRegisters(
4226  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4227  if (!reg.is(eax)) __ mov(reg, eax);
4228 
4229  // Done. Put the value in xmm0 into the value of the allocated heap
4230  // number.
4231  __ bind(&done);
4233  __ StoreToSafepointRegisterSlot(reg, reg);
4234 }
4235 
4236 
4237 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4238  class DeferredNumberTagD: public LDeferredCode {
4239  public:
4240  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4241  : LDeferredCode(codegen), instr_(instr) { }
4242  virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4243  virtual LInstruction* instr() { return instr_; }
4244  private:
4245  LNumberTagD* instr_;
4246  };
4247 
4248  XMMRegister input_reg = ToDoubleRegister(instr->value());
4249  Register reg = ToRegister(instr->result());
4250  Register tmp = ToRegister(instr->temp());
4251 
4252  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4253  if (FLAG_inline_new) {
4254  __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4255  } else {
4256  __ jmp(deferred->entry());
4257  }
4258  __ bind(deferred->exit());
4259  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4260 }
4261 
4262 
4263 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4264  // TODO(3095996): Get rid of this. For now, we need to make the
4265  // result register contain a valid pointer because it is already
4266  // contained in the register pointer map.
4267  Register reg = ToRegister(instr->result());
4268  __ Set(reg, Immediate(0));
4269 
4270  PushSafepointRegistersScope scope(this);
4271  // NumberTagI and NumberTagD use the context from the frame, rather than
4272  // the environment's HContext or HInlinedContext value.
4273  // They only call Runtime::kAllocateHeapNumber.
4274  // The corresponding HChange instructions are added in a phase that does
4275  // not have easy access to the local context.
4277  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4278  RecordSafepointWithRegisters(
4279  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4280  __ StoreToSafepointRegisterSlot(reg, eax);
4281 }
4282 
4283 
4284 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4285  LOperand* input = instr->value();
4286  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4287  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4288  __ SmiTag(ToRegister(input));
4289 }
4290 
4291 
4292 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4293  LOperand* input = instr->value();
4294  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4295  if (instr->needs_check()) {
4296  __ test(ToRegister(input), Immediate(kSmiTagMask));
4297  DeoptimizeIf(not_zero, instr->environment());
4298  } else {
4299  __ AssertSmi(ToRegister(input));
4300  }
4301  __ SmiUntag(ToRegister(input));
4302 }
4303 
4304 
4305 void LCodeGen::EmitNumberUntagD(Register input_reg,
4306  Register temp_reg,
4307  XMMRegister result_reg,
4308  bool deoptimize_on_undefined,
4309  bool deoptimize_on_minus_zero,
4310  LEnvironment* env) {
4311  Label load_smi, done;
4312 
4313  // Smi check.
4314  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4315 
4316  // Heap number map check.
4317  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4318  factory()->heap_number_map());
4319  if (deoptimize_on_undefined) {
4320  DeoptimizeIf(not_equal, env);
4321  } else {
4322  Label heap_number;
4323  __ j(equal, &heap_number, Label::kNear);
4324 
4325  __ cmp(input_reg, factory()->undefined_value());
4326  DeoptimizeIf(not_equal, env);
4327 
4328  // Convert undefined to NaN.
4329  ExternalReference nan =
4330  ExternalReference::address_of_canonical_non_hole_nan();
4331  __ movdbl(result_reg, Operand::StaticVariable(nan));
4332  __ jmp(&done, Label::kNear);
4333 
4334  __ bind(&heap_number);
4335  }
4336  // Heap number to XMM conversion.
4337  __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4338  if (deoptimize_on_minus_zero) {
4339  XMMRegister xmm_scratch = xmm0;
4340  __ xorps(xmm_scratch, xmm_scratch);
4341  __ ucomisd(result_reg, xmm_scratch);
4342  __ j(not_zero, &done, Label::kNear);
4343  __ movmskpd(temp_reg, result_reg);
4344  __ test_b(temp_reg, 1);
4345  DeoptimizeIf(not_zero, env);
4346  }
4347  __ jmp(&done, Label::kNear);
4348 
4349  // Smi to XMM conversion
4350  __ bind(&load_smi);
4351  __ SmiUntag(input_reg); // Untag smi before converting to float.
4352  __ cvtsi2sd(result_reg, Operand(input_reg));
4353  __ SmiTag(input_reg); // Retag smi.
4354  __ bind(&done);
4355 }
4356 
4357 
4358 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4359  Label done, heap_number;
4360  Register input_reg = ToRegister(instr->value());
4361 
4362  // Heap number map check.
4363  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4364  factory()->heap_number_map());
4365 
4366  if (instr->truncating()) {
4367  __ j(equal, &heap_number, Label::kNear);
4368  // Check for undefined. Undefined is converted to zero for truncating
4369  // conversions.
4370  __ cmp(input_reg, factory()->undefined_value());
4371  DeoptimizeIf(not_equal, instr->environment());
4372  __ mov(input_reg, 0);
4373  __ jmp(&done, Label::kNear);
4374 
4375  __ bind(&heap_number);
4377  CpuFeatures::Scope scope(SSE3);
4378  Label convert;
4379  // Use more powerful conversion when sse3 is available.
4380  // Load x87 register with heap number.
4381  __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4382  // Get exponent alone and check for too-big exponent.
4383  __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
4384  __ and_(input_reg, HeapNumber::kExponentMask);
4385  const uint32_t kTooBigExponent =
4387  __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
4388  __ j(less, &convert, Label::kNear);
4389  // Pop FPU stack before deoptimizing.
4390  __ fstp(0);
4391  DeoptimizeIf(no_condition, instr->environment());
4392 
4393  // Reserve space for 64 bit answer.
4394  __ bind(&convert);
4395  __ sub(Operand(esp), Immediate(kDoubleSize));
4396  // Do conversion, which cannot fail because we checked the exponent.
4397  __ fisttp_d(Operand(esp, 0));
4398  __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
4399  __ add(Operand(esp), Immediate(kDoubleSize));
4400  } else {
4401  XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4402  __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4403  __ cvttsd2si(input_reg, Operand(xmm0));
4404  __ cmp(input_reg, 0x80000000u);
4405  __ j(not_equal, &done);
4406  // Check if the input was 0x8000000 (kMinInt).
4407  // If no, then we got an overflow and we deoptimize.
4408  ExternalReference min_int = ExternalReference::address_of_min_int();
4409  __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
4410  __ ucomisd(xmm_temp, xmm0);
4411  DeoptimizeIf(not_equal, instr->environment());
4412  DeoptimizeIf(parity_even, instr->environment()); // NaN.
4413  }
4414  } else {
4415  // Deoptimize if we don't have a heap number.
4416  DeoptimizeIf(not_equal, instr->environment());
4417 
4418  XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4419  __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4420  __ cvttsd2si(input_reg, Operand(xmm0));
4421  __ cvtsi2sd(xmm_temp, Operand(input_reg));
4422  __ ucomisd(xmm0, xmm_temp);
4423  DeoptimizeIf(not_equal, instr->environment());
4424  DeoptimizeIf(parity_even, instr->environment()); // NaN.
4425  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4426  __ test(input_reg, Operand(input_reg));
4427  __ j(not_zero, &done);
4428  __ movmskpd(input_reg, xmm0);
4429  __ and_(input_reg, 1);
4430  DeoptimizeIf(not_zero, instr->environment());
4431  }
4432  }
4433  __ bind(&done);
4434 }
4435 
4436 
4437 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4438  class DeferredTaggedToI: public LDeferredCode {
4439  public:
4440  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4441  : LDeferredCode(codegen), instr_(instr) { }
4442  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4443  virtual LInstruction* instr() { return instr_; }
4444  private:
4445  LTaggedToI* instr_;
4446  };
4447 
4448  LOperand* input = instr->value();
4449  ASSERT(input->IsRegister());
4450  ASSERT(input->Equals(instr->result()));
4451 
4452  Register input_reg = ToRegister(input);
4453 
4454  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4455 
4456  // Smi check.
4457  __ JumpIfNotSmi(input_reg, deferred->entry());
4458 
4459  // Smi to int32 conversion
4460  __ SmiUntag(input_reg); // Untag smi.
4461 
4462  __ bind(deferred->exit());
4463 }
4464 
4465 
4466 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4467  LOperand* input = instr->value();
4468  ASSERT(input->IsRegister());
4469  LOperand* temp = instr->temp();
4470  ASSERT(temp == NULL || temp->IsRegister());
4471  LOperand* result = instr->result();
4472  ASSERT(result->IsDoubleRegister());
4473 
4474  Register input_reg = ToRegister(input);
4475  XMMRegister result_reg = ToDoubleRegister(result);
4476 
4477  bool deoptimize_on_minus_zero =
4478  instr->hydrogen()->deoptimize_on_minus_zero();
4479  Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
4480 
4481  EmitNumberUntagD(input_reg,
4482  temp_reg,
4483  result_reg,
4484  instr->hydrogen()->deoptimize_on_undefined(),
4485  deoptimize_on_minus_zero,
4486  instr->environment());
4487 }
4488 
4489 
4490 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4491  LOperand* input = instr->value();
4492  ASSERT(input->IsDoubleRegister());
4493  LOperand* result = instr->result();
4494  ASSERT(result->IsRegister());
4495 
4496  XMMRegister input_reg = ToDoubleRegister(input);
4497  Register result_reg = ToRegister(result);
4498 
4499  if (instr->truncating()) {
4500  // Performs a truncating conversion of a floating point number as used by
4501  // the JS bitwise operations.
4502  __ cvttsd2si(result_reg, Operand(input_reg));
4503  __ cmp(result_reg, 0x80000000u);
4505  // This will deoptimize if the exponent of the input in out of range.
4506  CpuFeatures::Scope scope(SSE3);
4507  Label convert, done;
4508  __ j(not_equal, &done, Label::kNear);
4509  __ sub(Operand(esp), Immediate(kDoubleSize));
4510  __ movdbl(Operand(esp, 0), input_reg);
4511  // Get exponent alone and check for too-big exponent.
4512  __ mov(result_reg, Operand(esp, sizeof(int32_t)));
4513  __ and_(result_reg, HeapNumber::kExponentMask);
4514  const uint32_t kTooBigExponent =
4516  __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
4517  __ j(less, &convert, Label::kNear);
4518  __ add(Operand(esp), Immediate(kDoubleSize));
4519  DeoptimizeIf(no_condition, instr->environment());
4520  __ bind(&convert);
4521  // Do conversion, which cannot fail because we checked the exponent.
4522  __ fld_d(Operand(esp, 0));
4523  __ fisttp_d(Operand(esp, 0));
4524  __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
4525  __ add(Operand(esp), Immediate(kDoubleSize));
4526  __ bind(&done);
4527  } else {
4528  Label done;
4529  Register temp_reg = ToRegister(instr->temp());
4530  XMMRegister xmm_scratch = xmm0;
4531 
4532  // If cvttsd2si succeeded, we're done. Otherwise, we attempt
4533  // manual conversion.
4534  __ j(not_equal, &done, Label::kNear);
4535 
4536  // Get high 32 bits of the input in result_reg and temp_reg.
4537  __ pshufd(xmm_scratch, input_reg, 1);
4538  __ movd(Operand(temp_reg), xmm_scratch);
4539  __ mov(result_reg, temp_reg);
4540 
4541  // Prepare negation mask in temp_reg.
4542  __ sar(temp_reg, kBitsPerInt - 1);
4543 
4544  // Extract the exponent from result_reg and subtract adjusted
4545  // bias from it. The adjustment is selected in a way such that
4546  // when the difference is zero, the answer is in the low 32 bits
4547  // of the input, otherwise a shift has to be performed.
4548  __ shr(result_reg, HeapNumber::kExponentShift);
4549  __ and_(result_reg,
4551  __ sub(Operand(result_reg),
4552  Immediate(HeapNumber::kExponentBias +
4555  // Don't handle big (> kMantissaBits + kExponentBits == 63) or
4556  // special exponents.
4557  DeoptimizeIf(greater, instr->environment());
4558 
4559  // Zero out the sign and the exponent in the input (by shifting
4560  // it to the left) and restore the implicit mantissa bit,
4561  // i.e. convert the input to unsigned int64 shifted left by
4562  // kExponentBits.
4563  ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
4564  // Minus zero has the most significant bit set and the other
4565  // bits cleared.
4566  __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
4567  __ psllq(input_reg, HeapNumber::kExponentBits);
4568  __ por(input_reg, xmm_scratch);
4569 
4570  // Get the amount to shift the input right in xmm_scratch.
4571  __ neg(result_reg);
4572  __ movd(xmm_scratch, Operand(result_reg));
4573 
4574  // Shift the input right and extract low 32 bits.
4575  __ psrlq(input_reg, xmm_scratch);
4576  __ movd(Operand(result_reg), input_reg);
4577 
4578  // Use the prepared mask in temp_reg to negate the result if necessary.
4579  __ xor_(result_reg, Operand(temp_reg));
4580  __ sub(result_reg, Operand(temp_reg));
4581  __ bind(&done);
4582  }
4583  } else {
4584  Label done;
4585  __ cvttsd2si(result_reg, Operand(input_reg));
4586  __ cvtsi2sd(xmm0, Operand(result_reg));
4587  __ ucomisd(xmm0, input_reg);
4588  DeoptimizeIf(not_equal, instr->environment());
4589  DeoptimizeIf(parity_even, instr->environment()); // NaN.
4590  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4591  // The integer converted back is equal to the original. We
4592  // only have to test if we got -0 as an input.
4593  __ test(result_reg, Operand(result_reg));
4594  __ j(not_zero, &done, Label::kNear);
4595  __ movmskpd(result_reg, input_reg);
4596  // Bit 0 contains the sign of the double in input_reg.
4597  // If input was positive, we are ok and return 0, otherwise
4598  // deoptimize.
4599  __ and_(result_reg, 1);
4600  DeoptimizeIf(not_zero, instr->environment());
4601  }
4602  __ bind(&done);
4603  }
4604 }
4605 
4606 
4607 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4608  LOperand* input = instr->value();
4609  __ test(ToOperand(input), Immediate(kSmiTagMask));
4610  DeoptimizeIf(not_zero, instr->environment());
4611 }
4612 
4613 
4614 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4615  LOperand* input = instr->value();
4616  __ test(ToOperand(input), Immediate(kSmiTagMask));
4617  DeoptimizeIf(zero, instr->environment());
4618 }
4619 
4620 
4621 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4622  Register input = ToRegister(instr->value());
4623  Register temp = ToRegister(instr->temp());
4624 
4625  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
4626 
4627  if (instr->hydrogen()->is_interval_check()) {
4628  InstanceType first;
4629  InstanceType last;
4630  instr->hydrogen()->GetCheckInterval(&first, &last);
4631 
4633  static_cast<int8_t>(first));
4634 
4635  // If there is only one type in the interval check for equality.
4636  if (first == last) {
4637  DeoptimizeIf(not_equal, instr->environment());
4638  } else {
4639  DeoptimizeIf(below, instr->environment());
4640  // Omit check for the last type.
4641  if (last != LAST_TYPE) {
4643  static_cast<int8_t>(last));
4644  DeoptimizeIf(above, instr->environment());
4645  }
4646  }
4647  } else {
4648  uint8_t mask;
4649  uint8_t tag;
4650  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4651 
4652  if (IsPowerOf2(mask)) {
4653  ASSERT(tag == 0 || IsPowerOf2(tag));
4654  __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
4655  DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
4656  } else {
4657  __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
4658  __ and_(temp, mask);
4659  __ cmp(temp, tag);
4660  DeoptimizeIf(not_equal, instr->environment());
4661  }
4662  }
4663 }
4664 
4665 
4666 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4667  Handle<JSFunction> target = instr->hydrogen()->target();
4668  if (isolate()->heap()->InNewSpace(*target)) {
4669  Register reg = ToRegister(instr->value());
4670  Handle<JSGlobalPropertyCell> cell =
4671  isolate()->factory()->NewJSGlobalPropertyCell(target);
4672  __ cmp(reg, Operand::Cell(cell));
4673  } else {
4674  Operand operand = ToOperand(instr->value());
4675  __ cmp(operand, target);
4676  }
4677  DeoptimizeIf(not_equal, instr->environment());
4678 }
4679 
4680 
4681 void LCodeGen::DoCheckMapCommon(Register reg,
4682  Handle<Map> map,
4683  CompareMapMode mode,
4684  LEnvironment* env) {
4685  Label success;
4686  __ CompareMap(reg, map, &success, mode);
4687  DeoptimizeIf(not_equal, env);
4688  __ bind(&success);
4689 }
4690 
4691 
4692 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4693  LOperand* input = instr->value();
4694  ASSERT(input->IsRegister());
4695  Register reg = ToRegister(input);
4696 
4697  Label success;
4698  SmallMapList* map_set = instr->hydrogen()->map_set();
4699  for (int i = 0; i < map_set->length() - 1; i++) {
4700  Handle<Map> map = map_set->at(i);
4701  __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
4702  __ j(equal, &success);
4703  }
4704  Handle<Map> map = map_set->last();
4705  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
4706  __ bind(&success);
4707 }
4708 
4709 
4710 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4711  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
4712  Register result_reg = ToRegister(instr->result());
4713  __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
4714 }
4715 
4716 
4717 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4718  ASSERT(instr->unclamped()->Equals(instr->result()));
4719  Register value_reg = ToRegister(instr->result());
4720  __ ClampUint8(value_reg);
4721 }
4722 
4723 
4724 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4725  ASSERT(instr->unclamped()->Equals(instr->result()));
4726  Register input_reg = ToRegister(instr->unclamped());
4727  Label is_smi, done, heap_number;
4728 
4729  __ JumpIfSmi(input_reg, &is_smi);
4730 
4731  // Check for heap number
4732  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4733  factory()->heap_number_map());
4734  __ j(equal, &heap_number, Label::kNear);
4735 
4736  // Check for undefined. Undefined is converted to zero for clamping
4737  // conversions.
4738  __ cmp(input_reg, factory()->undefined_value());
4739  DeoptimizeIf(not_equal, instr->environment());
4740  __ mov(input_reg, 0);
4741  __ jmp(&done, Label::kNear);
4742 
4743  // Heap number
4744  __ bind(&heap_number);
4745  __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4746  __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
4747  __ jmp(&done, Label::kNear);
4748 
4749  // smi
4750  __ bind(&is_smi);
4751  __ SmiUntag(input_reg);
4752  __ ClampUint8(input_reg);
4753 
4754  __ bind(&done);
4755 }
4756 
4757 
4758 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4759  Register reg = ToRegister(instr->temp());
4760 
4761  Handle<JSObject> holder = instr->holder();
4762  Handle<JSObject> current_prototype = instr->prototype();
4763 
4764  // Load prototype object.
4765  __ LoadHeapObject(reg, current_prototype);
4766 
4767  // Check prototype maps up to the holder.
4768  while (!current_prototype.is_identical_to(holder)) {
4769  DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4770  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4771 
4772  current_prototype =
4773  Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4774  // Load next prototype object.
4775  __ LoadHeapObject(reg, current_prototype);
4776  }
4777 
4778  // Check the holder map.
4779  DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4780  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4781 }
4782 
4783 
4784 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4785  class DeferredAllocateObject: public LDeferredCode {
4786  public:
4787  DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4788  : LDeferredCode(codegen), instr_(instr) { }
4789  virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4790  virtual LInstruction* instr() { return instr_; }
4791  private:
4792  LAllocateObject* instr_;
4793  };
4794 
4795  DeferredAllocateObject* deferred =
4796  new(zone()) DeferredAllocateObject(this, instr);
4797 
4798  Register result = ToRegister(instr->result());
4799  Register scratch = ToRegister(instr->temp());
4800  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4801  Handle<Map> initial_map(constructor->initial_map());
4802  int instance_size = initial_map->instance_size();
4803  ASSERT(initial_map->pre_allocated_property_fields() +
4804  initial_map->unused_property_fields() -
4805  initial_map->inobject_properties() == 0);
4806 
4807  // Allocate memory for the object. The initial map might change when
4808  // the constructor's prototype changes, but instance size and property
4809  // counts remain unchanged (if slack tracking finished).
4810  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4811  __ AllocateInNewSpace(instance_size,
4812  result,
4813  no_reg,
4814  scratch,
4815  deferred->entry(),
4816  TAG_OBJECT);
4817 
4818  __ bind(deferred->exit());
4819  if (FLAG_debug_code) {
4820  Label is_in_new_space;
4821  __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4822  __ Abort("Allocated object is not in new-space");
4823  __ bind(&is_in_new_space);
4824  }
4825 
4826  // Load the initial map.
4827  Register map = scratch;
4828  __ LoadHeapObject(scratch, constructor);
4830 
4831  if (FLAG_debug_code) {
4832  __ AssertNotSmi(map);
4834  instance_size >> kPointerSizeLog2);
4835  __ Assert(equal, "Unexpected instance size");
4837  initial_map->pre_allocated_property_fields());
4838  __ Assert(equal, "Unexpected pre-allocated property fields count");
4840  initial_map->unused_property_fields());
4841  __ Assert(equal, "Unexpected unused property fields count");
4843  initial_map->inobject_properties());
4844  __ Assert(equal, "Unexpected in-object property fields count");
4845  }
4846 
4847  // Initialize map and fields of the newly allocated object.
4848  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4849  __ mov(FieldOperand(result, JSObject::kMapOffset), map);
4850  __ mov(scratch, factory()->empty_fixed_array());
4851  __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
4852  __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
4853  if (initial_map->inobject_properties() != 0) {
4854  __ mov(scratch, factory()->undefined_value());
4855  for (int i = 0; i < initial_map->inobject_properties(); i++) {
4856  int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4857  __ mov(FieldOperand(result, property_offset), scratch);
4858  }
4859  }
4860 }
4861 
4862 
4863 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4864  Register result = ToRegister(instr->result());
4865  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4866  Handle<Map> initial_map(constructor->initial_map());
4867  int instance_size = initial_map->instance_size();
4868 
4869  // TODO(3095996): Get rid of this. For now, we need to make the
4870  // result register contain a valid pointer because it is already
4871  // contained in the register pointer map.
4872  __ Set(result, Immediate(0));
4873 
4874  PushSafepointRegistersScope scope(this);
4875  __ push(Immediate(Smi::FromInt(instance_size)));
4876  CallRuntimeFromDeferred(
4877  Runtime::kAllocateInNewSpace, 1, instr, instr->context());
4878  __ StoreToSafepointRegisterSlot(result, eax);
4879 }
4880 
4881 
4882 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4883  ASSERT(ToRegister(instr->context()).is(esi));
4884  Handle<FixedArray> literals(instr->environment()->closure()->literals());
4885  ElementsKind boilerplate_elements_kind =
4886  instr->hydrogen()->boilerplate_elements_kind();
4887 
4888  // Deopt if the array literal boilerplate ElementsKind is of a type different
4889  // than the expected one. The check isn't necessary if the boilerplate has
4890  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4892  boilerplate_elements_kind, true)) {
4893  __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
4895  // Load the map's "bit field 2". We only need the first byte,
4896  // but the following masking takes care of that anyway.
4898  // Retrieve elements_kind from bit field 2.
4899  __ and_(ebx, Map::kElementsKindMask);
4900  __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
4901  DeoptimizeIf(not_equal, instr->environment());
4902  }
4903 
4904  // Set up the parameters to the stub/runtime call.
4905  __ PushHeapObject(literals);
4906  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
4907  // Boilerplate already exists, constant elements are never accessed.
4908  // Pass an empty fixed array.
4909  __ push(Immediate(isolate()->factory()->empty_fixed_array()));
4910 
4911  // Pick the right runtime function or stub to call.
4912  int length = instr->hydrogen()->length();
4913  if (instr->hydrogen()->IsCopyOnWrite()) {
4914  ASSERT(instr->hydrogen()->depth() == 1);
4917  FastCloneShallowArrayStub stub(mode, length);
4918  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4919  } else if (instr->hydrogen()->depth() > 1) {
4920  CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4922  CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4923  } else {
4925  boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4928  FastCloneShallowArrayStub stub(mode, length);
4929  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4930  }
4931 }
4932 
4933 
4934 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4935  Register result,
4936  Register source,
4937  int* offset) {
4938  ASSERT(!source.is(ecx));
4939  ASSERT(!result.is(ecx));
4940 
4941  if (FLAG_debug_code) {
4942  __ LoadHeapObject(ecx, object);
4943  __ cmp(source, ecx);
4944  __ Assert(equal, "Unexpected object literal boilerplate");
4945  __ mov(ecx, FieldOperand(source, HeapObject::kMapOffset));
4946  __ cmp(ecx, Handle<Map>(object->map()));
4947  __ Assert(equal, "Unexpected boilerplate map");
4949  __ and_(ecx, Map::kElementsKindMask);
4950  __ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift);
4951  __ Assert(equal, "Unexpected boilerplate elements kind");
4952  }
4953 
4954  // Only elements backing stores for non-COW arrays need to be copied.
4955  Handle<FixedArrayBase> elements(object->elements());
4956  bool has_elements = elements->length() > 0 &&
4957  elements->map() != isolate()->heap()->fixed_cow_array_map();
4958 
4959  // Increase the offset so that subsequent objects end up right after
4960  // this object and its backing store.
4961  int object_offset = *offset;
4962  int object_size = object->map()->instance_size();
4963  int elements_offset = *offset + object_size;
4964  int elements_size = has_elements ? elements->Size() : 0;
4965  *offset += object_size + elements_size;
4966 
4967  // Copy object header.
4968  ASSERT(object->properties()->length() == 0);
4969  int inobject_properties = object->map()->inobject_properties();
4970  int header_size = object_size - inobject_properties * kPointerSize;
4971  for (int i = 0; i < header_size; i += kPointerSize) {
4972  if (has_elements && i == JSObject::kElementsOffset) {
4973  __ lea(ecx, Operand(result, elements_offset));
4974  } else {
4975  __ mov(ecx, FieldOperand(source, i));
4976  }
4977  __ mov(FieldOperand(result, object_offset + i), ecx);
4978  }
4979 
4980  // Copy in-object properties.
4981  for (int i = 0; i < inobject_properties; i++) {
4982  int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4983  Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4984  if (value->IsJSObject()) {
4985  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4986  __ lea(ecx, Operand(result, *offset));
4987  __ mov(FieldOperand(result, total_offset), ecx);
4988  __ LoadHeapObject(source, value_object);
4989  EmitDeepCopy(value_object, result, source, offset);
4990  } else if (value->IsHeapObject()) {
4991  __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
4992  __ mov(FieldOperand(result, total_offset), ecx);
4993  } else {
4994  __ mov(FieldOperand(result, total_offset), Immediate(value));
4995  }
4996  }
4997 
4998  if (has_elements) {
4999  // Copy elements backing store header.
5000  __ LoadHeapObject(source, elements);
5001  for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
5002  __ mov(ecx, FieldOperand(source, i));
5003  __ mov(FieldOperand(result, elements_offset + i), ecx);
5004  }
5005 
5006  // Copy elements backing store content.
5007  int elements_length = elements->length();
5008  if (elements->IsFixedDoubleArray()) {
5009  Handle<FixedDoubleArray> double_array =
5011  for (int i = 0; i < elements_length; i++) {
5012  int64_t value = double_array->get_representation(i);
5013  int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
5014  int32_t value_high = static_cast<int32_t>(value >> 32);
5015  int total_offset =
5016  elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
5017  __ mov(FieldOperand(result, total_offset), Immediate(value_low));
5018  __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
5019  }
5020  } else if (elements->IsFixedArray()) {
5021  Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
5022  for (int i = 0; i < elements_length; i++) {
5023  int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
5024  Handle<Object> value(fast_elements->get(i));
5025  if (value->IsJSObject()) {
5026  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
5027  __ lea(ecx, Operand(result, *offset));
5028  __ mov(FieldOperand(result, total_offset), ecx);
5029  __ LoadHeapObject(source, value_object);
5030  EmitDeepCopy(value_object, result, source, offset);
5031  } else if (value->IsHeapObject()) {
5032  __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
5033  __ mov(FieldOperand(result, total_offset), ecx);
5034  } else {
5035  __ mov(FieldOperand(result, total_offset), Immediate(value));
5036  }
5037  }
5038  } else {
5039  UNREACHABLE();
5040  }
5041  }
5042 }
5043 
5044 
5045 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
5046  ASSERT(ToRegister(instr->context()).is(esi));
5047  int size = instr->hydrogen()->total_size();
5048  ElementsKind boilerplate_elements_kind =
5049  instr->hydrogen()->boilerplate()->GetElementsKind();
5050 
5051  // Deopt if the literal boilerplate ElementsKind is of a type different than
5052  // the expected one. The check isn't necessary if the boilerplate has already
5053  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
5055  boilerplate_elements_kind, true)) {
5056  __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
5058  // Load the map's "bit field 2". We only need the first byte,
5059  // but the following masking takes care of that anyway.
5061  // Retrieve elements_kind from bit field 2.
5062  __ and_(ecx, Map::kElementsKindMask);
5063  __ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
5064  DeoptimizeIf(not_equal, instr->environment());
5065  }
5066 
5067  // Allocate all objects that are part of the literal in one big
5068  // allocation. This avoids multiple limit checks.
5069  Label allocated, runtime_allocate;
5070  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
5071  __ jmp(&allocated);
5072 
5073  __ bind(&runtime_allocate);
5074  __ push(Immediate(Smi::FromInt(size)));
5075  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5076 
5077  __ bind(&allocated);
5078  int offset = 0;
5079  __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
5080  EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
5081  ASSERT_EQ(size, offset);
5082 }
5083 
5084 
5085 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
5086  ASSERT(ToRegister(instr->context()).is(esi));
5087  Handle<FixedArray> literals(instr->environment()->closure()->literals());
5088  Handle<FixedArray> constant_properties =
5089  instr->hydrogen()->constant_properties();
5090 
5091  // Set up the parameters to the stub/runtime call.
5092  __ PushHeapObject(literals);
5093  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
5094  __ push(Immediate(constant_properties));
5095  int flags = instr->hydrogen()->fast_elements()
5098  flags |= instr->hydrogen()->has_function()
5101  __ push(Immediate(Smi::FromInt(flags)));
5102 
5103  // Pick the right runtime function or stub to call.
5104  int properties_count = constant_properties->length() / 2;
5105  if (instr->hydrogen()->depth() > 1) {
5106  CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
5107  } else if (flags != ObjectLiteral::kFastElements ||
5109  CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
5110  } else {
5111  FastCloneShallowObjectStub stub(properties_count);
5112  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5113  }
5114 }
5115 
5116 
5117 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5118  ASSERT(ToRegister(instr->value()).is(eax));
5119  __ push(eax);
5120  CallRuntime(Runtime::kToFastProperties, 1, instr);
5121 }
5122 
5123 
5124 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5125  ASSERT(ToRegister(instr->context()).is(esi));
5126  Label materialized;
5127  // Registers will be used as follows:
5128  // ecx = literals array.
5129  // ebx = regexp literal.
5130  // eax = regexp literal clone.
5131  // esi = context.
5132  int literal_offset =
5133  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5134  __ LoadHeapObject(ecx, instr->hydrogen()->literals());
5135  __ mov(ebx, FieldOperand(ecx, literal_offset));
5136  __ cmp(ebx, factory()->undefined_value());
5137  __ j(not_equal, &materialized, Label::kNear);
5138 
5139  // Create regexp literal using runtime function
5140  // Result will be in eax.
5141  __ push(ecx);
5142  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
5143  __ push(Immediate(instr->hydrogen()->pattern()));
5144  __ push(Immediate(instr->hydrogen()->flags()));
5145  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5146  __ mov(ebx, eax);
5147 
5148  __ bind(&materialized);
5150  Label allocated, runtime_allocate;
5151  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
5152  __ jmp(&allocated);
5153 
5154  __ bind(&runtime_allocate);
5155  __ push(ebx);
5156  __ push(Immediate(Smi::FromInt(size)));
5157  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5158  __ pop(ebx);
5159 
5160  __ bind(&allocated);
5161  // Copy the content into the newly allocated memory.
5162  // (Unroll copy loop once for better throughput).
5163  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5164  __ mov(edx, FieldOperand(ebx, i));
5165  __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
5166  __ mov(FieldOperand(eax, i), edx);
5167  __ mov(FieldOperand(eax, i + kPointerSize), ecx);
5168  }
5169  if ((size % (2 * kPointerSize)) != 0) {
5170  __ mov(edx, FieldOperand(ebx, size - kPointerSize));
5171  __ mov(FieldOperand(eax, size - kPointerSize), edx);
5172  }
5173 }
5174 
5175 
5176 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5177  ASSERT(ToRegister(instr->context()).is(esi));
5178  // Use the fast case closure allocation code that allocates in new
5179  // space for nested functions that don't need literals cloning.
5180  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
5181  bool pretenure = instr->hydrogen()->pretenure();
5182  if (!pretenure && shared_info->num_literals() == 0) {
5183  FastNewClosureStub stub(shared_info->language_mode());
5184  __ push(Immediate(shared_info));
5185  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5186  } else {
5187  __ push(esi);
5188  __ push(Immediate(shared_info));
5189  __ push(Immediate(pretenure
5190  ? factory()->true_value()
5191  : factory()->false_value()));
5192  CallRuntime(Runtime::kNewClosure, 3, instr);
5193  }
5194 }
5195 
5196 
5197 void LCodeGen::DoTypeof(LTypeof* instr) {
5198  LOperand* input = instr->value();
5199  EmitPushTaggedOperand(input);
5200  CallRuntime(Runtime::kTypeof, 1, instr);
5201 }
5202 
5203 
5204 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5205  Register input = ToRegister(instr->value());
5206  int true_block = chunk_->LookupDestination(instr->true_block_id());
5207  int false_block = chunk_->LookupDestination(instr->false_block_id());
5208  Label* true_label = chunk_->GetAssemblyLabel(true_block);
5209  Label* false_label = chunk_->GetAssemblyLabel(false_block);
5210 
5211  Condition final_branch_condition =
5212  EmitTypeofIs(true_label, false_label, input, instr->type_literal());
5213  if (final_branch_condition != no_condition) {
5214  EmitBranch(true_block, false_block, final_branch_condition);
5215  }
5216 }
5217 
5218 
5219 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5220  Label* false_label,
5221  Register input,
5222  Handle<String> type_name) {
5223  Condition final_branch_condition = no_condition;
5224  if (type_name->Equals(heap()->number_symbol())) {
5225  __ JumpIfSmi(input, true_label);
5227  factory()->heap_number_map());
5228  final_branch_condition = equal;
5229 
5230  } else if (type_name->Equals(heap()->string_symbol())) {
5231  __ JumpIfSmi(input, false_label);
5232  __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5233  __ j(above_equal, false_label);
5234  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5235  1 << Map::kIsUndetectable);
5236  final_branch_condition = zero;
5237 
5238  } else if (type_name->Equals(heap()->boolean_symbol())) {
5239  __ cmp(input, factory()->true_value());
5240  __ j(equal, true_label);
5241  __ cmp(input, factory()->false_value());
5242  final_branch_condition = equal;
5243 
5244  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
5245  __ cmp(input, factory()->null_value());
5246  final_branch_condition = equal;
5247 
5248  } else if (type_name->Equals(heap()->undefined_symbol())) {
5249  __ cmp(input, factory()->undefined_value());
5250  __ j(equal, true_label);
5251  __ JumpIfSmi(input, false_label);
5252  // Check for undetectable objects => true.
5253  __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5254  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5255  1 << Map::kIsUndetectable);
5256  final_branch_condition = not_zero;
5257 
5258  } else if (type_name->Equals(heap()->function_symbol())) {
5260  __ JumpIfSmi(input, false_label);
5261  __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5262  __ j(equal, true_label);
5263  __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5264  final_branch_condition = equal;
5265 
5266  } else if (type_name->Equals(heap()->object_symbol())) {
5267  __ JumpIfSmi(input, false_label);
5268  if (!FLAG_harmony_typeof) {
5269  __ cmp(input, factory()->null_value());
5270  __ j(equal, true_label);
5271  }
5272  __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5273  __ j(below, false_label);
5274  __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5275  __ j(above, false_label);
5276  // Check for undetectable objects => false.
5277  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5278  1 << Map::kIsUndetectable);
5279  final_branch_condition = zero;
5280 
5281  } else {
5282  __ jmp(false_label);
5283  }
5284  return final_branch_condition;
5285 }
5286 
5287 
5288 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5289  Register temp = ToRegister(instr->temp());
5290  int true_block = chunk_->LookupDestination(instr->true_block_id());
5291  int false_block = chunk_->LookupDestination(instr->false_block_id());
5292 
5293  EmitIsConstructCall(temp);
5294  EmitBranch(true_block, false_block, equal);
5295 }
5296 
5297 
5298 void LCodeGen::EmitIsConstructCall(Register temp) {
5299  // Get the frame pointer for the calling frame.
5300  __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
5301 
5302  // Skip the arguments adaptor frame if it exists.
5303  Label check_frame_marker;
5304  __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5306  __ j(not_equal, &check_frame_marker, Label::kNear);
5307  __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5308 
5309  // Check the marker in the calling frame.
5310  __ bind(&check_frame_marker);
5311  __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5312  Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
5313 }
5314 
5315 
5316 void LCodeGen::EnsureSpaceForLazyDeopt() {
5317  // Ensure that we have enough space after the previous lazy-bailout
5318  // instruction for patching the code here.
5319  int current_pc = masm()->pc_offset();
5320  int patch_size = Deoptimizer::patch_size();
5321  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5322  int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5323  __ Nop(padding_size);
5324  }
5325  last_lazy_deopt_pc_ = masm()->pc_offset();
5326 }
5327 
5328 
5329 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5330  EnsureSpaceForLazyDeopt();
5331  ASSERT(instr->HasEnvironment());
5332  LEnvironment* env = instr->environment();
5333  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5334  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5335 }
5336 
5337 
5338 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5339  DeoptimizeIf(no_condition, instr->environment());
5340 }
5341 
5342 
5343 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5344  LOperand* obj = instr->object();
5345  LOperand* key = instr->key();
5346  __ push(ToOperand(obj));
5347  EmitPushTaggedOperand(key);
5348  ASSERT(instr->HasPointerMap());
5349  LPointerMap* pointers = instr->pointer_map();
5350  RecordPosition(pointers->position());
5351  // Create safepoint generator that will also ensure enough space in the
5352  // reloc info for patching in deoptimization (since this is invoking a
5353  // builtin)
5354  SafepointGenerator safepoint_generator(
5355  this, pointers, Safepoint::kLazyDeopt);
5356  __ push(Immediate(Smi::FromInt(strict_mode_flag())));
5357  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
5358 }
5359 
5360 
5361 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5362  PushSafepointRegistersScope scope(this);
5364  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5365  RecordSafepointWithLazyDeopt(
5366  instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5367  ASSERT(instr->HasEnvironment());
5368  LEnvironment* env = instr->environment();
5369  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5370 }
5371 
5372 
5373 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5374  class DeferredStackCheck: public LDeferredCode {
5375  public:
5376  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5377  : LDeferredCode(codegen), instr_(instr) { }
5378  virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5379  virtual LInstruction* instr() { return instr_; }
5380  private:
5381  LStackCheck* instr_;
5382  };
5383 
5384  ASSERT(instr->HasEnvironment());
5385  LEnvironment* env = instr->environment();
5386  // There is no LLazyBailout instruction for stack-checks. We have to
5387  // prepare for lazy deoptimization explicitly here.
5388  if (instr->hydrogen()->is_function_entry()) {
5389  // Perform stack overflow check.
5390  Label done;
5391  ExternalReference stack_limit =
5392  ExternalReference::address_of_stack_limit(isolate());
5393  __ cmp(esp, Operand::StaticVariable(stack_limit));
5394  __ j(above_equal, &done, Label::kNear);
5395 
5396  ASSERT(instr->context()->IsRegister());
5397  ASSERT(ToRegister(instr->context()).is(esi));
5398  StackCheckStub stub;
5399  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5400  EnsureSpaceForLazyDeopt();
5401  __ bind(&done);
5402  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5403  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5404  } else {
5405  ASSERT(instr->hydrogen()->is_backwards_branch());
5406  // Perform stack overflow check if this goto needs it before jumping.
5407  DeferredStackCheck* deferred_stack_check =
5408  new(zone()) DeferredStackCheck(this, instr);
5409  ExternalReference stack_limit =
5410  ExternalReference::address_of_stack_limit(isolate());
5411  __ cmp(esp, Operand::StaticVariable(stack_limit));
5412  __ j(below, deferred_stack_check->entry());
5413  EnsureSpaceForLazyDeopt();
5414  __ bind(instr->done_label());
5415  deferred_stack_check->SetExit(instr->done_label());
5416  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5417  // Don't record a deoptimization index for the safepoint here.
5418  // This will be done explicitly when emitting call and the safepoint in
5419  // the deferred code.
5420  }
5421 }
5422 
5423 
5424 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5425  // This is a pseudo-instruction that ensures that the environment here is
5426  // properly registered for deoptimization and records the assembler's PC
5427  // offset.
5428  LEnvironment* environment = instr->environment();
5429  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5430  instr->SpilledDoubleRegisterArray());
5431 
5432  // If the environment were already registered, we would have no way of
5433  // backpatching it with the spill slot operands.
5434  ASSERT(!environment->HasBeenRegistered());
5435  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5436  ASSERT(osr_pc_offset_ == -1);
5437  osr_pc_offset_ = masm()->pc_offset();
5438 }
5439 
5440 
5441 void LCodeGen::DoIn(LIn* instr) {
5442  LOperand* obj = instr->object();
5443  LOperand* key = instr->key();
5444  EmitPushTaggedOperand(key);
5445  EmitPushTaggedOperand(obj);
5446  ASSERT(instr->HasPointerMap());
5447  LPointerMap* pointers = instr->pointer_map();
5448  RecordPosition(pointers->position());
5449  SafepointGenerator safepoint_generator(
5450  this, pointers, Safepoint::kLazyDeopt);
5451  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
5452 }
5453 
5454 
5455 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5456  __ cmp(eax, isolate()->factory()->undefined_value());
5457  DeoptimizeIf(equal, instr->environment());
5458 
5459  __ cmp(eax, isolate()->factory()->null_value());
5460  DeoptimizeIf(equal, instr->environment());
5461 
5462  __ test(eax, Immediate(kSmiTagMask));
5463  DeoptimizeIf(zero, instr->environment());
5464 
5466  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
5467  DeoptimizeIf(below_equal, instr->environment());
5468 
5469  Label use_cache, call_runtime;
5470  __ CheckEnumCache(&call_runtime);
5471 
5473  __ jmp(&use_cache, Label::kNear);
5474 
5475  // Get the set of properties to enumerate.
5476  __ bind(&call_runtime);
5477  __ push(eax);
5478  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5479 
5481  isolate()->factory()->meta_map());
5482  DeoptimizeIf(not_equal, instr->environment());
5483  __ bind(&use_cache);
5484 }
5485 
5486 
5487 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5488  Register map = ToRegister(instr->map());
5489  Register result = ToRegister(instr->result());
5490  Label load_cache, done;
5491  __ EnumLength(result, map);
5492  __ cmp(result, Immediate(Smi::FromInt(0)));
5493  __ j(not_equal, &load_cache);
5494  __ mov(result, isolate()->factory()->empty_fixed_array());
5495  __ jmp(&done);
5496 
5497  __ bind(&load_cache);
5498  __ LoadInstanceDescriptors(map, result);
5499  __ mov(result,
5501  __ mov(result,
5502  FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5503  __ bind(&done);
5504  __ test(result, result);
5505  DeoptimizeIf(equal, instr->environment());
5506 }
5507 
5508 
5509 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5510  Register object = ToRegister(instr->value());
5511  __ cmp(ToRegister(instr->map()),
5513  DeoptimizeIf(not_equal, instr->environment());
5514 }
5515 
5516 
5517 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5518  Register object = ToRegister(instr->object());
5519  Register index = ToRegister(instr->index());
5520 
5521  Label out_of_object, done;
5522  __ cmp(index, Immediate(0));
5523  __ j(less, &out_of_object);
5524  __ mov(object, FieldOperand(object,
5525  index,
5528  __ jmp(&done, Label::kNear);
5529 
5530  __ bind(&out_of_object);
5531  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
5532  __ neg(index);
5533  // Index is now equal to out of object property index plus 1.
5534  __ mov(object, FieldOperand(object,
5535  index,
5537  FixedArray::kHeaderSize - kPointerSize));
5538  __ bind(&done);
5539 }
5540 
5541 
5542 #undef __
5543 
5544 } } // namespace v8::internal
5545 
5546 #endif // V8_TARGET_ARCH_IA32
byte * Address
Definition: globals.h:157
static const int kElementsKindMask
Definition: objects.h:5185
const int kMinInt
Definition: globals.h:211
static const int kBitFieldOffset
Definition: objects.h:5160
static LGap * cast(LInstruction *instr)
Definition: lithium-arm.h:327
const intptr_t kSmiTagMask
Definition: v8.h:4016
static const int kCodeEntryOffset
Definition: objects.h:6182
static const int kMaxAsciiCharCode
Definition: objects.h:7327
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:6183
static int SlotOffset(int index)
Definition: contexts.h:425
static const int kEnumCacheOffset
Definition: objects.h:2632
static const uint32_t kExponentMask
Definition: objects.h:1352
static Smi * FromInt(int value)
Definition: objects-inl.h:981
bool IsFastObjectElementsKind(ElementsKind kind)
const int KB
Definition: globals.h:207
const Register esp
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
Definition: handles.h:81
static const int kGlobalReceiverOffset
Definition: objects.h:6288
static const int kNativeByteOffset
Definition: objects.h:5971
static const int kExponentBias
Definition: objects.h:1356
int int32_t
Definition: unicode.cc:47
static XMMRegister FromAllocationIndex(int index)
static bool IsSupported(CpuFeature f)
static const int kStrictModeBitWithinByte
Definition: objects.h:5957
const int kNoAlignmentPadding
Definition: frames-ia32.h:56
static const int kExternalPointerOffset
Definition: objects.h:3741
static const int kSize
Definition: objects.h:6625
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
#define ASSERT_GE(v1, v2)
Definition: checks.h:273
const int kPointerSizeLog2
Definition: globals.h:232
static const int kInstanceSizeOffset
Definition: objects.h:5147
static const int kInObjectFieldCount
Definition: objects.h:6679
static const int kStressDeoptCounterOffset
Definition: objects.h:5877
static const int kMaximumSlots
Definition: code-stubs.h:344
static const int kInstanceClassNameOffset
Definition: objects.h:5800
static const int kUnusedPropertyFieldsOffset
Definition: objects.h:5159
int WhichPowerOf2(uint32_t x)
Definition: utils.h:56
bool is_uint32(int64_t x)
Definition: assembler-x64.h:48
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:6187
const int kAlignmentPaddingPushed
Definition: frames-ia32.h:57
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
Definition: handles.cc:326
const Register edi
static const int kHashFieldOffset
Definition: objects.h:7319
Condition ReverseCondition(Condition cond)
#define IN
const uint32_t kSlotsZapValue
Definition: v8globals.h:83
const Register ebp
#define UNREACHABLE()
Definition: checks.h:50
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7318
static const int kExponentShift
Definition: objects.h:1357
const Register eax
static const int kValueOffset
Definition: objects.h:1342
const uint32_t kHoleNanUpper32
Definition: v8globals.h:469
const int kDoubleSize
Definition: globals.h:218
const XMMRegister xmm1
const int kPointerSize
Definition: globals.h:220
static void MaybeCallEntryHook(MacroAssembler *masm)
Operand FieldOperand(Register object, int offset)
const Register ecx
const int kHeapObjectTag
Definition: v8.h:4009
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:271
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
#define __
static const int kDynamicAlignmentStateOffset
Definition: frames-ia32.h:127
static const int kCacheStampOffset
Definition: objects.h:6476
static const int kPropertiesOffset
Definition: objects.h:2171
const int kAlignmentZapValue
Definition: frames-ia32.h:58
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
static const int kInObjectPropertiesOffset
Definition: objects.h:5149
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
Definition: objects.h:2439
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static void EnsureRelocSpaceForLazyDeoptimization(Handle< Code > code)
static const int kElementsOffset
Definition: objects.h:2172
static const int kNativeBitWithinByte
Definition: objects.h:5963
static const int kContainsCachedArrayIndexMask
Definition: objects.h:7374
bool IsPowerOf2(T x)
Definition: utils.h:50
static Vector< T > New(int length)
Definition: utils.h:370
const int kBitsPerInt
Definition: globals.h:240
int ElementsKindToShiftSize(ElementsKind elements_kind)
Definition: lithium.cc:230
Vector< const char > CStrVector(const char *data)
Definition: utils.h:526
static int OffsetOfElementAt(int index)
Definition: objects.h:2356
static const int kLengthOffset
Definition: objects.h:8332
static int SizeFor(int length)
Definition: objects.h:2353
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:2296
static const int kMapOffset
Definition: objects.h:1261
static const int kValueOffset
Definition: objects.h:6468
bool is(Register reg) const
static const int kLengthOffset
Definition: objects.h:2295
const Register ebx
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:457
static const int kHasNonInstancePrototype
Definition: objects.h:5167
ElementsKind GetInitialFastElementsKind()
static const uint32_t kSignMask
Definition: objects.h:1351
static const int kStrictModeByteOffset
Definition: objects.h:5967
const int kSmiTagSize
Definition: v8.h:4015
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
const Register esi
static const int kElementsKindShift
Definition: objects.h:5181
static const int kConstructorOffset
Definition: objects.h:5127
const int kSmiTag
Definition: v8.h:4014
#define ASSERT_NE(v1, v2)
Definition: checks.h:272
static const int kIsUndetectable
Definition: objects.h:5171
static const int kHeaderSize
Definition: objects.h:2173
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPrototypeOffset
Definition: objects.h:5126
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kMaxLength
Definition: objects.h:7386
static const int kValueOffset
Definition: objects.h:6385
static const int kNativeContextOffset
Definition: objects.h:6286
static const int kExponentBits
Definition: objects.h:1355
const XMMRegister xmm2
const Register edx
static const int kSharedFunctionInfoOffset
Definition: objects.h:6185
Register ToRegister(int num)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
static const int kMaxValue
Definition: objects.h:1050
static const int kMantissaBits
Definition: objects.h:1354
static const int kBitField2Offset
Definition: objects.h:5161
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
Definition: ic.cc:2565
#define ARRAY_SIZE(a)
Definition: globals.h:281
static const int kExponentOffset
Definition: objects.h:1348
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1258
static JSObject * cast(Object *obj)
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
Definition: objects.h:5158
virtual void BeforeCall(int call_size) const
static const int kPreAllocatedPropertyFieldsOffset
Definition: objects.h:5152
static const int kMantissaOffset
Definition: objects.h:1347
const XMMRegister xmm0