v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_IA32)
31 
33 #include "code-stubs.h"
34 #include "deoptimizer.h"
35 #include "stub-cache.h"
36 #include "codegen.h"
37 
38 namespace v8 {
39 namespace internal {
40 
41 
42 // When invoking builtins, we need to record the safepoint in the middle of
43 // the invoke instruction sequence generated by the macro assembler.
44 class SafepointGenerator : public CallWrapper {
45  public:
46  SafepointGenerator(LCodeGen* codegen,
47  LPointerMap* pointers,
48  Safepoint::DeoptMode mode)
49  : codegen_(codegen),
50  pointers_(pointers),
51  deopt_mode_(mode) {}
52  virtual ~SafepointGenerator() { }
53 
54  virtual void BeforeCall(int call_size) const {}
55 
56  virtual void AfterCall() const {
57  codegen_->RecordSafepoint(pointers_, deopt_mode_);
58  }
59 
60  private:
61  LCodeGen* codegen_;
62  LPointerMap* pointers_;
63  Safepoint::DeoptMode deopt_mode_;
64 };
65 
66 
67 #define __ masm()->
68 
69 bool LCodeGen::GenerateCode() {
70  HPhase phase("Z_Code generation", chunk());
71  ASSERT(is_unused());
72  status_ = GENERATING;
73  CpuFeatures::Scope scope(SSE2);
74 
75  CodeStub::GenerateFPStubs();
76 
77  // Open a frame scope to indicate that there is a frame on the stack. The
78  // MANUAL indicates that the scope shouldn't actually generate code to set up
79  // the frame (that is done in GeneratePrologue).
80  FrameScope frame_scope(masm_, StackFrame::MANUAL);
81 
82  dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
83  !chunk()->graph()->is_recursive()) ||
84  info()->osr_ast_id() != AstNode::kNoNumber;
85 
86  return GeneratePrologue() &&
87  GenerateBody() &&
88  GenerateDeferredCode() &&
89  GenerateSafepointTable();
90 }
91 
92 
93 void LCodeGen::FinishCode(Handle<Code> code) {
94  ASSERT(is_done());
95  code->set_stack_slots(GetStackSlotCount());
96  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
97  PopulateDeoptimizationData(code);
99 }
100 
101 
102 void LCodeGen::Abort(const char* format, ...) {
103  if (FLAG_trace_bailout) {
104  SmartArrayPointer<char> name(
105  info()->shared_info()->DebugName()->ToCString());
106  PrintF("Aborting LCodeGen in @\"%s\": ", *name);
107  va_list arguments;
108  va_start(arguments, format);
109  OS::VPrint(format, arguments);
110  va_end(arguments);
111  PrintF("\n");
112  }
113  status_ = ABORTED;
114 }
115 
116 
117 void LCodeGen::Comment(const char* format, ...) {
118  if (!FLAG_code_comments) return;
119  char buffer[4 * KB];
120  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
121  va_list arguments;
122  va_start(arguments, format);
123  builder.AddFormattedList(format, arguments);
124  va_end(arguments);
125 
126  // Copy the string before recording it in the assembler to avoid
127  // issues when the stack allocated buffer goes out of scope.
128  size_t length = builder.position();
129  Vector<char> copy = Vector<char>::New(length + 1);
130  memcpy(copy.start(), builder.Finalize(), copy.length());
131  masm()->RecordComment(copy.start());
132 }
133 
134 
135 bool LCodeGen::GeneratePrologue() {
136  ASSERT(is_generating());
137 
138 #ifdef DEBUG
139  if (strlen(FLAG_stop_at) > 0 &&
140  info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
141  __ int3();
142  }
143 #endif
144 
145  // Strict mode functions and builtins need to replace the receiver
146  // with undefined when called as functions (without an explicit
147  // receiver object). ecx is zero for method calls and non-zero for
148  // function calls.
149  if (!info_->is_classic_mode() || info_->is_native()) {
150  Label ok;
151  __ test(ecx, Operand(ecx));
152  __ j(zero, &ok, Label::kNear);
153  // +1 for return address.
154  int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
155  __ mov(Operand(esp, receiver_offset),
156  Immediate(isolate()->factory()->undefined_value()));
157  __ bind(&ok);
158  }
159 
160 
161  if (dynamic_frame_alignment_) {
162  // Move state of dynamic frame alignment into edx.
163  __ mov(edx, Immediate(kNoAlignmentPadding));
164 
165  Label do_not_pad, align_loop;
167  // Align esp + 4 to a multiple of 2 * kPointerSize.
168  __ test(esp, Immediate(kPointerSize));
169  __ j(not_zero, &do_not_pad, Label::kNear);
170  __ push(Immediate(0));
171  __ mov(ebx, esp);
172  __ mov(edx, Immediate(kAlignmentPaddingPushed));
173  // Copy arguments, receiver, and return address.
174  __ mov(ecx, Immediate(scope()->num_parameters() + 2));
175 
176  __ bind(&align_loop);
177  __ mov(eax, Operand(ebx, 1 * kPointerSize));
178  __ mov(Operand(ebx, 0), eax);
179  __ add(Operand(ebx), Immediate(kPointerSize));
180  __ dec(ecx);
181  __ j(not_zero, &align_loop, Label::kNear);
182  __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
183  __ bind(&do_not_pad);
184  }
185 
186  __ push(ebp); // Caller's frame pointer.
187  __ mov(ebp, esp);
188  __ push(esi); // Callee's context.
189  __ push(edi); // Callee's JS function.
190 
191  if (dynamic_frame_alignment_ && FLAG_debug_code) {
192  __ test(esp, Immediate(kPointerSize));
193  __ Assert(zero, "frame is expected to be aligned");
194  }
195 
196  // Reserve space for the stack slots needed by the code.
197  int slots = GetStackSlotCount();
198  ASSERT_GE(slots, 1);
199  if (slots == 1) {
200  if (dynamic_frame_alignment_) {
201  __ push(edx);
202  } else {
203  __ push(Immediate(kNoAlignmentPadding));
204  }
205  } else {
206  if (FLAG_debug_code) {
207  __ mov(Operand(eax), Immediate(slots));
208  Label loop;
209  __ bind(&loop);
210  __ push(Immediate(kSlotsZapValue));
211  __ dec(eax);
212  __ j(not_zero, &loop);
213  } else {
214  __ sub(Operand(esp), Immediate(slots * kPointerSize));
215  #ifdef _MSC_VER
216  // On windows, you may not access the stack more than one page below
217  // the most recently mapped page. To make the allocated area randomly
218  // accessible, we write to each page in turn (the value is irrelevant).
219  const int kPageSize = 4 * KB;
220  for (int offset = slots * kPointerSize - kPageSize;
221  offset > 0;
222  offset -= kPageSize) {
223  __ mov(Operand(esp, offset), eax);
224  }
225  #endif
226  }
227 
228  // Store dynamic frame alignment state in the first local.
229  if (dynamic_frame_alignment_) {
230  __ mov(Operand(ebp,
232  edx);
233  } else {
234  __ mov(Operand(ebp,
236  Immediate(kNoAlignmentPadding));
237  }
238  }
239 
240  // Possibly allocate a local context.
241  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
242  if (heap_slots > 0) {
243  Comment(";;; Allocate local context");
244  // Argument to NewContext is the function, which is still in edi.
245  __ push(edi);
246  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
247  FastNewContextStub stub(heap_slots);
248  __ CallStub(&stub);
249  } else {
250  __ CallRuntime(Runtime::kNewFunctionContext, 1);
251  }
252  RecordSafepoint(Safepoint::kNoLazyDeopt);
253  // Context is returned in both eax and esi. It replaces the context
254  // passed to us. It's saved in the stack and kept live in esi.
256 
257  // Copy parameters into context if necessary.
258  int num_parameters = scope()->num_parameters();
259  for (int i = 0; i < num_parameters; i++) {
260  Variable* var = scope()->parameter(i);
261  if (var->IsContextSlot()) {
262  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
263  (num_parameters - 1 - i) * kPointerSize;
264  // Load parameter from stack.
265  __ mov(eax, Operand(ebp, parameter_offset));
266  // Store it in the context.
267  int context_offset = Context::SlotOffset(var->index());
268  __ mov(Operand(esi, context_offset), eax);
269  // Update the write barrier. This clobbers eax and ebx.
270  __ RecordWriteContextSlot(esi,
271  context_offset,
272  eax,
273  ebx,
275  }
276  }
277  Comment(";;; End allocate local context");
278  }
279 
280  // Trace the call.
281  if (FLAG_trace) {
282  // We have not executed any compiled code yet, so esi still holds the
283  // incoming context.
284  __ CallRuntime(Runtime::kTraceEnter, 0);
285  }
286  return !is_aborted();
287 }
288 
289 
290 bool LCodeGen::GenerateBody() {
291  ASSERT(is_generating());
292  bool emit_instructions = true;
293  for (current_instruction_ = 0;
294  !is_aborted() && current_instruction_ < instructions_->length();
295  current_instruction_++) {
296  LInstruction* instr = instructions_->at(current_instruction_);
297  if (instr->IsLabel()) {
298  LLabel* label = LLabel::cast(instr);
299  emit_instructions = !label->HasReplacement();
300  }
301 
302  if (emit_instructions) {
303  Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
304  instr->CompileToNative(this);
305  }
306  }
307  EnsureSpaceForLazyDeopt();
308  return !is_aborted();
309 }
310 
311 
312 bool LCodeGen::GenerateDeferredCode() {
313  ASSERT(is_generating());
314  if (deferred_.length() > 0) {
315  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
316  LDeferredCode* code = deferred_[i];
317  __ bind(code->entry());
318  Comment(";;; Deferred code @%d: %s.",
319  code->instruction_index(),
320  code->instr()->Mnemonic());
321  code->Generate();
322  __ jmp(code->exit());
323  }
324  }
325 
326  // Deferred code is the last part of the instruction sequence. Mark
327  // the generated code as done unless we bailed out.
328  if (!is_aborted()) status_ = DONE;
329  return !is_aborted();
330 }
331 
332 
333 bool LCodeGen::GenerateSafepointTable() {
334  ASSERT(is_done());
335  safepoints_.Emit(masm(), GetStackSlotCount());
336  return !is_aborted();
337 }
338 
339 
340 Register LCodeGen::ToRegister(int index) const {
341  return Register::FromAllocationIndex(index);
342 }
343 
344 
345 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
346  return XMMRegister::FromAllocationIndex(index);
347 }
348 
349 
350 Register LCodeGen::ToRegister(LOperand* op) const {
351  ASSERT(op->IsRegister());
352  return ToRegister(op->index());
353 }
354 
355 
356 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
357  ASSERT(op->IsDoubleRegister());
358  return ToDoubleRegister(op->index());
359 }
360 
361 
362 int LCodeGen::ToInteger32(LConstantOperand* op) const {
363  Handle<Object> value = chunk_->LookupLiteral(op);
364  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
365  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
366  value->Number());
367  return static_cast<int32_t>(value->Number());
368 }
369 
370 
371 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
372  Handle<Object> literal = chunk_->LookupLiteral(op);
373  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
374  return literal;
375 }
376 
377 
378 double LCodeGen::ToDouble(LConstantOperand* op) const {
379  Handle<Object> value = chunk_->LookupLiteral(op);
380  return value->Number();
381 }
382 
383 
384 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
385  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
386 }
387 
388 
389 Operand LCodeGen::ToOperand(LOperand* op) const {
390  if (op->IsRegister()) return Operand(ToRegister(op));
391  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
392  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
393  int index = op->index();
394  if (index >= 0) {
395  // Local or spill slot. Skip the frame pointer, function, and
396  // context in the fixed part of the frame.
397  return Operand(ebp, -(index + 3) * kPointerSize);
398  } else {
399  // Incoming parameter. Skip the return address.
400  return Operand(ebp, -(index - 1) * kPointerSize);
401  }
402 }
403 
404 
405 Operand LCodeGen::HighOperand(LOperand* op) {
406  ASSERT(op->IsDoubleStackSlot());
407  int index = op->index();
408  int offset = (index >= 0) ? index + 3 : index - 1;
409  return Operand(ebp, -offset * kPointerSize);
410 }
411 
412 
413 void LCodeGen::WriteTranslation(LEnvironment* environment,
414  Translation* translation) {
415  if (environment == NULL) return;
416 
417  // The translation includes one command per value in the environment.
418  int translation_size = environment->values()->length();
419  // The output frame height does not include the parameters.
420  int height = translation_size - environment->parameter_count();
421 
422  WriteTranslation(environment->outer(), translation);
423  int closure_id = DefineDeoptimizationLiteral(environment->closure());
424  switch (environment->frame_type()) {
425  case JS_FUNCTION:
426  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
427  break;
428  case JS_CONSTRUCT:
429  translation->BeginConstructStubFrame(closure_id, translation_size);
430  break;
431  case ARGUMENTS_ADAPTOR:
432  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
433  break;
434  default:
435  UNREACHABLE();
436  }
437  for (int i = 0; i < translation_size; ++i) {
438  LOperand* value = environment->values()->at(i);
439  // spilled_registers_ and spilled_double_registers_ are either
440  // both NULL or both set.
441  if (environment->spilled_registers() != NULL && value != NULL) {
442  if (value->IsRegister() &&
443  environment->spilled_registers()[value->index()] != NULL) {
444  translation->MarkDuplicate();
445  AddToTranslation(translation,
446  environment->spilled_registers()[value->index()],
447  environment->HasTaggedValueAt(i));
448  } else if (
449  value->IsDoubleRegister() &&
450  environment->spilled_double_registers()[value->index()] != NULL) {
451  translation->MarkDuplicate();
452  AddToTranslation(
453  translation,
454  environment->spilled_double_registers()[value->index()],
455  false);
456  }
457  }
458 
459  AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
460  }
461 }
462 
463 
464 void LCodeGen::AddToTranslation(Translation* translation,
465  LOperand* op,
466  bool is_tagged) {
467  if (op == NULL) {
468  // TODO(twuerthinger): Introduce marker operands to indicate that this value
469  // is not present and must be reconstructed from the deoptimizer. Currently
470  // this is only used for the arguments object.
471  translation->StoreArgumentsObject();
472  } else if (op->IsStackSlot()) {
473  if (is_tagged) {
474  translation->StoreStackSlot(op->index());
475  } else {
476  translation->StoreInt32StackSlot(op->index());
477  }
478  } else if (op->IsDoubleStackSlot()) {
479  translation->StoreDoubleStackSlot(op->index());
480  } else if (op->IsArgument()) {
481  ASSERT(is_tagged);
482  int src_index = GetStackSlotCount() + op->index();
483  translation->StoreStackSlot(src_index);
484  } else if (op->IsRegister()) {
485  Register reg = ToRegister(op);
486  if (is_tagged) {
487  translation->StoreRegister(reg);
488  } else {
489  translation->StoreInt32Register(reg);
490  }
491  } else if (op->IsDoubleRegister()) {
492  XMMRegister reg = ToDoubleRegister(op);
493  translation->StoreDoubleRegister(reg);
494  } else if (op->IsConstantOperand()) {
495  Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
496  int src_index = DefineDeoptimizationLiteral(literal);
497  translation->StoreLiteral(src_index);
498  } else {
499  UNREACHABLE();
500  }
501 }
502 
503 
504 void LCodeGen::CallCodeGeneric(Handle<Code> code,
505  RelocInfo::Mode mode,
506  LInstruction* instr,
507  SafepointMode safepoint_mode) {
508  ASSERT(instr != NULL);
509  LPointerMap* pointers = instr->pointer_map();
510  RecordPosition(pointers->position());
511  __ call(code, mode);
512  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
513 
514  // Signal that we don't inline smi code before these stubs in the
515  // optimizing code generator.
516  if (code->kind() == Code::BINARY_OP_IC ||
517  code->kind() == Code::COMPARE_IC) {
518  __ nop();
519  }
520 }
521 
522 
523 void LCodeGen::CallCode(Handle<Code> code,
524  RelocInfo::Mode mode,
525  LInstruction* instr) {
526  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
527 }
528 
529 
530 void LCodeGen::CallRuntime(const Runtime::Function* fun,
531  int argc,
532  LInstruction* instr) {
533  ASSERT(instr != NULL);
534  ASSERT(instr->HasPointerMap());
535  LPointerMap* pointers = instr->pointer_map();
536  RecordPosition(pointers->position());
537 
538  __ CallRuntime(fun, argc);
539 
540  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
541 }
542 
543 
544 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
545  int argc,
546  LInstruction* instr,
547  LOperand* context) {
548  if (context->IsRegister()) {
549  if (!ToRegister(context).is(esi)) {
550  __ mov(esi, ToRegister(context));
551  }
552  } else if (context->IsStackSlot()) {
553  __ mov(esi, ToOperand(context));
554  } else if (context->IsConstantOperand()) {
555  Handle<Object> literal =
556  chunk_->LookupLiteral(LConstantOperand::cast(context));
557  __ LoadHeapObject(esi, Handle<Context>::cast(literal));
558  } else {
559  UNREACHABLE();
560  }
561 
562  __ CallRuntimeSaveDoubles(id);
563  RecordSafepointWithRegisters(
564  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
565 }
566 
567 
568 void LCodeGen::RegisterEnvironmentForDeoptimization(
569  LEnvironment* environment, Safepoint::DeoptMode mode) {
570  if (!environment->HasBeenRegistered()) {
571  // Physical stack frame layout:
572  // -x ............. -4 0 ..................................... y
573  // [incoming arguments] [spill slots] [pushed outgoing arguments]
574 
575  // Layout of the environment:
576  // 0 ..................................................... size-1
577  // [parameters] [locals] [expression stack including arguments]
578 
579  // Layout of the translation:
580  // 0 ........................................................ size - 1 + 4
581  // [expression stack including arguments] [locals] [4 words] [parameters]
582  // |>------------ translation_size ------------<|
583 
584  int frame_count = 0;
585  int jsframe_count = 0;
586  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
587  ++frame_count;
588  if (e->frame_type() == JS_FUNCTION) {
589  ++jsframe_count;
590  }
591  }
592  Translation translation(&translations_, frame_count, jsframe_count,
593  zone());
594  WriteTranslation(environment, &translation);
595  int deoptimization_index = deoptimizations_.length();
596  int pc_offset = masm()->pc_offset();
597  environment->Register(deoptimization_index,
598  translation.index(),
599  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
600  deoptimizations_.Add(environment, zone());
601  }
602 }
603 
604 
605 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
606  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
607  ASSERT(environment->HasBeenRegistered());
608  int id = environment->deoptimization_index();
610  if (entry == NULL) {
611  Abort("bailout was not prepared");
612  return;
613  }
614 
615  if (FLAG_deopt_every_n_times != 0) {
616  Handle<SharedFunctionInfo> shared(info_->shared_info());
617  Label no_deopt;
618  __ pushfd();
619  __ push(eax);
620  __ push(ebx);
621  __ mov(ebx, shared);
622  __ mov(eax,
624  __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
625  __ j(not_zero, &no_deopt, Label::kNear);
626  if (FLAG_trap_on_deopt) __ int3();
627  __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
629  eax);
630  __ pop(ebx);
631  __ pop(eax);
632  __ popfd();
633  __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
634 
635  __ bind(&no_deopt);
637  eax);
638  __ pop(ebx);
639  __ pop(eax);
640  __ popfd();
641  }
642 
643  if (cc == no_condition) {
644  if (FLAG_trap_on_deopt) __ int3();
645  __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
646  } else {
647  if (FLAG_trap_on_deopt) {
648  Label done;
649  __ j(NegateCondition(cc), &done, Label::kNear);
650  __ int3();
651  __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
652  __ bind(&done);
653  } else {
654  __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
655  }
656  }
657 }
658 
659 
660 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
661  int length = deoptimizations_.length();
662  if (length == 0) return;
663  Handle<DeoptimizationInputData> data =
664  factory()->NewDeoptimizationInputData(length, TENURED);
665 
666  Handle<ByteArray> translations = translations_.CreateByteArray();
667  data->SetTranslationByteArray(*translations);
668  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
669 
670  Handle<FixedArray> literals =
671  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
672  for (int i = 0; i < deoptimization_literals_.length(); i++) {
673  literals->set(i, *deoptimization_literals_[i]);
674  }
675  data->SetLiteralArray(*literals);
676 
677  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
678  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
679 
680  // Populate the deoptimization entries.
681  for (int i = 0; i < length; i++) {
682  LEnvironment* env = deoptimizations_[i];
683  data->SetAstId(i, Smi::FromInt(env->ast_id()));
684  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
685  data->SetArgumentsStackHeight(i,
686  Smi::FromInt(env->arguments_stack_height()));
687  data->SetPc(i, Smi::FromInt(env->pc_offset()));
688  }
689  code->set_deoptimization_data(*data);
690 }
691 
692 
693 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
694  int result = deoptimization_literals_.length();
695  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
696  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
697  }
698  deoptimization_literals_.Add(literal, zone());
699  return result;
700 }
701 
702 
703 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
704  ASSERT(deoptimization_literals_.length() == 0);
705 
706  const ZoneList<Handle<JSFunction> >* inlined_closures =
707  chunk()->inlined_closures();
708 
709  for (int i = 0, length = inlined_closures->length();
710  i < length;
711  i++) {
712  DefineDeoptimizationLiteral(inlined_closures->at(i));
713  }
714 
715  inlined_function_count_ = deoptimization_literals_.length();
716 }
717 
718 
719 void LCodeGen::RecordSafepointWithLazyDeopt(
720  LInstruction* instr, SafepointMode safepoint_mode) {
721  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
722  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
723  } else {
724  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
725  RecordSafepointWithRegisters(
726  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
727  }
728 }
729 
730 
731 void LCodeGen::RecordSafepoint(
732  LPointerMap* pointers,
733  Safepoint::Kind kind,
734  int arguments,
735  Safepoint::DeoptMode deopt_mode) {
736  ASSERT(kind == expected_safepoint_kind_);
737  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
738  Safepoint safepoint =
739  safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
740  for (int i = 0; i < operands->length(); i++) {
741  LOperand* pointer = operands->at(i);
742  if (pointer->IsStackSlot()) {
743  safepoint.DefinePointerSlot(pointer->index(), zone());
744  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
745  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
746  }
747  }
748 }
749 
750 
751 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
752  Safepoint::DeoptMode mode) {
753  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
754 }
755 
756 
757 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
758  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
759  RecordSafepoint(&empty_pointers, mode);
760 }
761 
762 
763 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
764  int arguments,
765  Safepoint::DeoptMode mode) {
766  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
767 }
768 
769 
770 void LCodeGen::RecordPosition(int position) {
771  if (position == RelocInfo::kNoPosition) return;
772  masm()->positions_recorder()->RecordPosition(position);
773 }
774 
775 
776 void LCodeGen::DoLabel(LLabel* label) {
777  if (label->is_loop_header()) {
778  Comment(";;; B%d - LOOP entry", label->block_id());
779  } else {
780  Comment(";;; B%d", label->block_id());
781  }
782  __ bind(label->label());
783  current_block_ = label->block_id();
784  DoGap(label);
785 }
786 
787 
788 void LCodeGen::DoParallelMove(LParallelMove* move) {
789  resolver_.Resolve(move);
790 }
791 
792 
793 void LCodeGen::DoGap(LGap* gap) {
794  for (int i = LGap::FIRST_INNER_POSITION;
796  i++) {
797  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
798  LParallelMove* move = gap->GetParallelMove(inner_pos);
799  if (move != NULL) DoParallelMove(move);
800  }
801 }
802 
803 
804 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
805  DoGap(instr);
806 }
807 
808 
809 void LCodeGen::DoParameter(LParameter* instr) {
810  // Nothing to do.
811 }
812 
813 
814 void LCodeGen::DoCallStub(LCallStub* instr) {
815  ASSERT(ToRegister(instr->context()).is(esi));
816  ASSERT(ToRegister(instr->result()).is(eax));
817  switch (instr->hydrogen()->major_key()) {
818  case CodeStub::RegExpConstructResult: {
819  RegExpConstructResultStub stub;
820  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
821  break;
822  }
823  case CodeStub::RegExpExec: {
824  RegExpExecStub stub;
825  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
826  break;
827  }
828  case CodeStub::SubString: {
829  SubStringStub stub;
830  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
831  break;
832  }
833  case CodeStub::NumberToString: {
834  NumberToStringStub stub;
835  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
836  break;
837  }
838  case CodeStub::StringAdd: {
839  StringAddStub stub(NO_STRING_ADD_FLAGS);
840  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
841  break;
842  }
843  case CodeStub::StringCompare: {
844  StringCompareStub stub;
845  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
846  break;
847  }
848  case CodeStub::TranscendentalCache: {
849  TranscendentalCacheStub stub(instr->transcendental_type(),
851  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
852  break;
853  }
854  default:
855  UNREACHABLE();
856  }
857 }
858 
859 
860 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
861  // Nothing to do.
862 }
863 
864 
865 void LCodeGen::DoModI(LModI* instr) {
866  if (instr->hydrogen()->HasPowerOf2Divisor()) {
867  Register dividend = ToRegister(instr->InputAt(0));
868 
869  int32_t divisor =
870  HConstant::cast(instr->hydrogen()->right())->Integer32Value();
871 
872  if (divisor < 0) divisor = -divisor;
873 
874  Label positive_dividend, done;
875  __ test(dividend, Operand(dividend));
876  __ j(not_sign, &positive_dividend, Label::kNear);
877  __ neg(dividend);
878  __ and_(dividend, divisor - 1);
879  __ neg(dividend);
880  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
881  __ j(not_zero, &done, Label::kNear);
882  DeoptimizeIf(no_condition, instr->environment());
883  } else {
884  __ jmp(&done, Label::kNear);
885  }
886  __ bind(&positive_dividend);
887  __ and_(dividend, divisor - 1);
888  __ bind(&done);
889  } else {
890  Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
891  Register left_reg = ToRegister(instr->InputAt(0));
892  Register right_reg = ToRegister(instr->InputAt(1));
893  Register result_reg = ToRegister(instr->result());
894 
895  ASSERT(left_reg.is(eax));
896  ASSERT(result_reg.is(edx));
897  ASSERT(!right_reg.is(eax));
898  ASSERT(!right_reg.is(edx));
899 
900  // Check for x % 0.
901  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
902  __ test(right_reg, Operand(right_reg));
903  DeoptimizeIf(zero, instr->environment());
904  }
905 
906  __ test(left_reg, Operand(left_reg));
907  __ j(zero, &remainder_eq_dividend, Label::kNear);
908  __ j(sign, &slow, Label::kNear);
909 
910  __ test(right_reg, Operand(right_reg));
911  __ j(not_sign, &both_positive, Label::kNear);
912  // The sign of the divisor doesn't matter.
913  __ neg(right_reg);
914 
915  __ bind(&both_positive);
916  // If the dividend is smaller than the nonnegative
917  // divisor, the dividend is the result.
918  __ cmp(left_reg, Operand(right_reg));
919  __ j(less, &remainder_eq_dividend, Label::kNear);
920 
921  // Check if the divisor is a PowerOfTwo integer.
922  Register scratch = ToRegister(instr->TempAt(0));
923  __ mov(scratch, right_reg);
924  __ sub(Operand(scratch), Immediate(1));
925  __ test(scratch, Operand(right_reg));
926  __ j(not_zero, &do_subtraction, Label::kNear);
927  __ and_(left_reg, Operand(scratch));
928  __ jmp(&remainder_eq_dividend, Label::kNear);
929 
930  __ bind(&do_subtraction);
931  const int kUnfolds = 3;
932  // Try a few subtractions of the dividend.
933  __ mov(scratch, left_reg);
934  for (int i = 0; i < kUnfolds; i++) {
935  // Reduce the dividend by the divisor.
936  __ sub(left_reg, Operand(right_reg));
937  // Check if the dividend is less than the divisor.
938  __ cmp(left_reg, Operand(right_reg));
939  __ j(less, &remainder_eq_dividend, Label::kNear);
940  }
941  __ mov(left_reg, scratch);
942 
943  // Slow case, using idiv instruction.
944  __ bind(&slow);
945  // Sign extend to edx.
946  __ cdq();
947 
948  // Check for (0 % -x) that will produce negative zero.
949  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
950  Label positive_left;
951  Label done;
952  __ test(left_reg, Operand(left_reg));
953  __ j(not_sign, &positive_left, Label::kNear);
954  __ idiv(right_reg);
955 
956  // Test the remainder for 0, because then the result would be -0.
957  __ test(result_reg, Operand(result_reg));
958  __ j(not_zero, &done, Label::kNear);
959 
960  DeoptimizeIf(no_condition, instr->environment());
961  __ bind(&positive_left);
962  __ idiv(right_reg);
963  __ bind(&done);
964  } else {
965  __ idiv(right_reg);
966  }
967  __ jmp(&done, Label::kNear);
968 
969  __ bind(&remainder_eq_dividend);
970  __ mov(result_reg, left_reg);
971 
972  __ bind(&done);
973  }
974 }
975 
976 
977 void LCodeGen::DoDivI(LDivI* instr) {
978  LOperand* right = instr->InputAt(1);
979  ASSERT(ToRegister(instr->result()).is(eax));
980  ASSERT(ToRegister(instr->InputAt(0)).is(eax));
981  ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
982  ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
983 
984  Register left_reg = eax;
985 
986  // Check for x / 0.
987  Register right_reg = ToRegister(right);
988  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
989  __ test(right_reg, ToOperand(right));
990  DeoptimizeIf(zero, instr->environment());
991  }
992 
993  // Check for (0 / -x) that will produce negative zero.
994  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
995  Label left_not_zero;
996  __ test(left_reg, Operand(left_reg));
997  __ j(not_zero, &left_not_zero, Label::kNear);
998  __ test(right_reg, ToOperand(right));
999  DeoptimizeIf(sign, instr->environment());
1000  __ bind(&left_not_zero);
1001  }
1002 
1003  // Check for (-kMinInt / -1).
1004  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1005  Label left_not_min_int;
1006  __ cmp(left_reg, kMinInt);
1007  __ j(not_zero, &left_not_min_int, Label::kNear);
1008  __ cmp(right_reg, -1);
1009  DeoptimizeIf(zero, instr->environment());
1010  __ bind(&left_not_min_int);
1011  }
1012 
1013  // Sign extend to edx.
1014  __ cdq();
1015  __ idiv(right_reg);
1016 
1017  // Deoptimize if remainder is not 0.
1018  __ test(edx, Operand(edx));
1019  DeoptimizeIf(not_zero, instr->environment());
1020 }
1021 
1022 
1023 void LCodeGen::DoMulI(LMulI* instr) {
1024  Register left = ToRegister(instr->InputAt(0));
1025  LOperand* right = instr->InputAt(1);
1026 
1027  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1028  __ mov(ToRegister(instr->TempAt(0)), left);
1029  }
1030 
1031  if (right->IsConstantOperand()) {
1032  // Try strength reductions on the multiplication.
1033  // All replacement instructions are at most as long as the imul
1034  // and have better latency.
1035  int constant = ToInteger32(LConstantOperand::cast(right));
1036  if (constant == -1) {
1037  __ neg(left);
1038  } else if (constant == 0) {
1039  __ xor_(left, Operand(left));
1040  } else if (constant == 2) {
1041  __ add(left, Operand(left));
1042  } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1043  // If we know that the multiplication can't overflow, it's safe to
1044  // use instructions that don't set the overflow flag for the
1045  // multiplication.
1046  switch (constant) {
1047  case 1:
1048  // Do nothing.
1049  break;
1050  case 3:
1051  __ lea(left, Operand(left, left, times_2, 0));
1052  break;
1053  case 4:
1054  __ shl(left, 2);
1055  break;
1056  case 5:
1057  __ lea(left, Operand(left, left, times_4, 0));
1058  break;
1059  case 8:
1060  __ shl(left, 3);
1061  break;
1062  case 9:
1063  __ lea(left, Operand(left, left, times_8, 0));
1064  break;
1065  case 16:
1066  __ shl(left, 4);
1067  break;
1068  default:
1069  __ imul(left, left, constant);
1070  break;
1071  }
1072  } else {
1073  __ imul(left, left, constant);
1074  }
1075  } else {
1076  __ imul(left, ToOperand(right));
1077  }
1078 
1079  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1080  DeoptimizeIf(overflow, instr->environment());
1081  }
1082 
1083  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1084  // Bail out if the result is supposed to be negative zero.
1085  Label done;
1086  __ test(left, Operand(left));
1087  __ j(not_zero, &done, Label::kNear);
1088  if (right->IsConstantOperand()) {
1089  if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
1090  DeoptimizeIf(no_condition, instr->environment());
1091  }
1092  } else {
1093  // Test the non-zero operand for negative sign.
1094  __ or_(ToRegister(instr->TempAt(0)), ToOperand(right));
1095  DeoptimizeIf(sign, instr->environment());
1096  }
1097  __ bind(&done);
1098  }
1099 }
1100 
1101 
1102 void LCodeGen::DoBitI(LBitI* instr) {
1103  LOperand* left = instr->InputAt(0);
1104  LOperand* right = instr->InputAt(1);
1105  ASSERT(left->Equals(instr->result()));
1106  ASSERT(left->IsRegister());
1107 
1108  if (right->IsConstantOperand()) {
1109  int right_operand = ToInteger32(LConstantOperand::cast(right));
1110  switch (instr->op()) {
1111  case Token::BIT_AND:
1112  __ and_(ToRegister(left), right_operand);
1113  break;
1114  case Token::BIT_OR:
1115  __ or_(ToRegister(left), right_operand);
1116  break;
1117  case Token::BIT_XOR:
1118  __ xor_(ToRegister(left), right_operand);
1119  break;
1120  default:
1121  UNREACHABLE();
1122  break;
1123  }
1124  } else {
1125  switch (instr->op()) {
1126  case Token::BIT_AND:
1127  __ and_(ToRegister(left), ToOperand(right));
1128  break;
1129  case Token::BIT_OR:
1130  __ or_(ToRegister(left), ToOperand(right));
1131  break;
1132  case Token::BIT_XOR:
1133  __ xor_(ToRegister(left), ToOperand(right));
1134  break;
1135  default:
1136  UNREACHABLE();
1137  break;
1138  }
1139  }
1140 }
1141 
1142 
1143 void LCodeGen::DoShiftI(LShiftI* instr) {
1144  LOperand* left = instr->InputAt(0);
1145  LOperand* right = instr->InputAt(1);
1146  ASSERT(left->Equals(instr->result()));
1147  ASSERT(left->IsRegister());
1148  if (right->IsRegister()) {
1149  ASSERT(ToRegister(right).is(ecx));
1150 
1151  switch (instr->op()) {
1152  case Token::SAR:
1153  __ sar_cl(ToRegister(left));
1154  break;
1155  case Token::SHR:
1156  __ shr_cl(ToRegister(left));
1157  if (instr->can_deopt()) {
1158  __ test(ToRegister(left), Immediate(0x80000000));
1159  DeoptimizeIf(not_zero, instr->environment());
1160  }
1161  break;
1162  case Token::SHL:
1163  __ shl_cl(ToRegister(left));
1164  break;
1165  default:
1166  UNREACHABLE();
1167  break;
1168  }
1169  } else {
1170  int value = ToInteger32(LConstantOperand::cast(right));
1171  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1172  switch (instr->op()) {
1173  case Token::SAR:
1174  if (shift_count != 0) {
1175  __ sar(ToRegister(left), shift_count);
1176  }
1177  break;
1178  case Token::SHR:
1179  if (shift_count == 0 && instr->can_deopt()) {
1180  __ test(ToRegister(left), Immediate(0x80000000));
1181  DeoptimizeIf(not_zero, instr->environment());
1182  } else {
1183  __ shr(ToRegister(left), shift_count);
1184  }
1185  break;
1186  case Token::SHL:
1187  if (shift_count != 0) {
1188  __ shl(ToRegister(left), shift_count);
1189  }
1190  break;
1191  default:
1192  UNREACHABLE();
1193  break;
1194  }
1195  }
1196 }
1197 
1198 
1199 void LCodeGen::DoSubI(LSubI* instr) {
1200  LOperand* left = instr->InputAt(0);
1201  LOperand* right = instr->InputAt(1);
1202  ASSERT(left->Equals(instr->result()));
1203 
1204  if (right->IsConstantOperand()) {
1205  __ sub(ToOperand(left), ToInteger32Immediate(right));
1206  } else {
1207  __ sub(ToRegister(left), ToOperand(right));
1208  }
1209  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1210  DeoptimizeIf(overflow, instr->environment());
1211  }
1212 }
1213 
1214 
1215 void LCodeGen::DoConstantI(LConstantI* instr) {
1216  ASSERT(instr->result()->IsRegister());
1217  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1218 }
1219 
1220 
1221 void LCodeGen::DoConstantD(LConstantD* instr) {
1222  ASSERT(instr->result()->IsDoubleRegister());
1223  XMMRegister res = ToDoubleRegister(instr->result());
1224  double v = instr->value();
1225  // Use xor to produce +0.0 in a fast and compact way, but avoid to
1226  // do so if the constant is -0.0.
1227  if (BitCast<uint64_t, double>(v) == 0) {
1228  __ xorps(res, res);
1229  } else {
1230  Register temp = ToRegister(instr->TempAt(0));
1231  uint64_t int_val = BitCast<uint64_t, double>(v);
1232  int32_t lower = static_cast<int32_t>(int_val);
1233  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1235  CpuFeatures::Scope scope(SSE4_1);
1236  if (lower != 0) {
1237  __ Set(temp, Immediate(lower));
1238  __ movd(res, Operand(temp));
1239  __ Set(temp, Immediate(upper));
1240  __ pinsrd(res, Operand(temp), 1);
1241  } else {
1242  __ xorps(res, res);
1243  __ Set(temp, Immediate(upper));
1244  __ pinsrd(res, Operand(temp), 1);
1245  }
1246  } else {
1247  __ Set(temp, Immediate(upper));
1248  __ movd(res, Operand(temp));
1249  __ psllq(res, 32);
1250  if (lower != 0) {
1251  __ Set(temp, Immediate(lower));
1252  __ movd(xmm0, Operand(temp));
1253  __ por(res, xmm0);
1254  }
1255  }
1256  }
1257 }
1258 
1259 
1260 void LCodeGen::DoConstantT(LConstantT* instr) {
1261  Register reg = ToRegister(instr->result());
1262  Handle<Object> handle = instr->value();
1263  if (handle->IsHeapObject()) {
1264  __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
1265  } else {
1266  __ Set(reg, Immediate(handle));
1267  }
1268 }
1269 
1270 
1271 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1272  Register result = ToRegister(instr->result());
1273  Register array = ToRegister(instr->InputAt(0));
1274  __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
1275 }
1276 
1277 
1278 void LCodeGen::DoFixedArrayBaseLength(
1279  LFixedArrayBaseLength* instr) {
1280  Register result = ToRegister(instr->result());
1281  Register array = ToRegister(instr->InputAt(0));
1282  __ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
1283 }
1284 
1285 
1286 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1287  Register result = ToRegister(instr->result());
1288  Register input = ToRegister(instr->InputAt(0));
1289 
1290  // Load map into |result|.
1291  __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
1292  // Load the map's "bit field 2" into |result|. We only need the first byte,
1293  // but the following masking takes care of that anyway.
1294  __ mov(result, FieldOperand(result, Map::kBitField2Offset));
1295  // Retrieve elements_kind from bit field 2.
1296  __ and_(result, Map::kElementsKindMask);
1297  __ shr(result, Map::kElementsKindShift);
1298 }
1299 
1300 
1301 void LCodeGen::DoValueOf(LValueOf* instr) {
1302  Register input = ToRegister(instr->InputAt(0));
1303  Register result = ToRegister(instr->result());
1304  Register map = ToRegister(instr->TempAt(0));
1305  ASSERT(input.is(result));
1306 
1307  Label done;
1308  // If the object is a smi return the object.
1309  __ JumpIfSmi(input, &done, Label::kNear);
1310 
1311  // If the object is not a value type, return the object.
1312  __ CmpObjectType(input, JS_VALUE_TYPE, map);
1313  __ j(not_equal, &done, Label::kNear);
1314  __ mov(result, FieldOperand(input, JSValue::kValueOffset));
1315 
1316  __ bind(&done);
1317 }
1318 
1319 
1320 void LCodeGen::DoDateField(LDateField* instr) {
1321  Register object = ToRegister(instr->InputAt(0));
1322  Register result = ToRegister(instr->result());
1323  Register scratch = ToRegister(instr->TempAt(0));
1324  Smi* index = instr->index();
1325  Label runtime, done;
1326  ASSERT(object.is(result));
1327  ASSERT(object.is(eax));
1328 
1329 #ifdef DEBUG
1330  __ AbortIfSmi(object);
1331  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
1332  __ Assert(equal, "Trying to get date field from non-date.");
1333 #endif
1334 
1335  if (index->value() == 0) {
1336  __ mov(result, FieldOperand(object, JSDate::kValueOffset));
1337  } else {
1338  if (index->value() < JSDate::kFirstUncachedField) {
1339  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1340  __ mov(scratch, Operand::StaticVariable(stamp));
1341  __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
1342  __ j(not_equal, &runtime, Label::kNear);
1343  __ mov(result, FieldOperand(object, JSDate::kValueOffset +
1344  kPointerSize * index->value()));
1345  __ jmp(&done);
1346  }
1347  __ bind(&runtime);
1348  __ PrepareCallCFunction(2, scratch);
1349  __ mov(Operand(esp, 0), object);
1350  __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
1351  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1352  __ bind(&done);
1353  }
1354 }
1355 
1356 
1357 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1358  LOperand* input = instr->InputAt(0);
1359  ASSERT(input->Equals(instr->result()));
1360  __ not_(ToRegister(input));
1361 }
1362 
1363 
1364 void LCodeGen::DoThrow(LThrow* instr) {
1365  __ push(ToOperand(instr->value()));
1366  ASSERT(ToRegister(instr->context()).is(esi));
1367  CallRuntime(Runtime::kThrow, 1, instr);
1368 
1369  if (FLAG_debug_code) {
1370  Comment("Unreachable code.");
1371  __ int3();
1372  }
1373 }
1374 
1375 
1376 void LCodeGen::DoAddI(LAddI* instr) {
1377  LOperand* left = instr->InputAt(0);
1378  LOperand* right = instr->InputAt(1);
1379  ASSERT(left->Equals(instr->result()));
1380 
1381  if (right->IsConstantOperand()) {
1382  __ add(ToOperand(left), ToInteger32Immediate(right));
1383  } else {
1384  __ add(ToRegister(left), ToOperand(right));
1385  }
1386 
1387  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1388  DeoptimizeIf(overflow, instr->environment());
1389  }
1390 }
1391 
1392 
1393 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1394  XMMRegister left = ToDoubleRegister(instr->InputAt(0));
1395  XMMRegister right = ToDoubleRegister(instr->InputAt(1));
1396  XMMRegister result = ToDoubleRegister(instr->result());
1397  // Modulo uses a fixed result register.
1398  ASSERT(instr->op() == Token::MOD || left.is(result));
1399  switch (instr->op()) {
1400  case Token::ADD:
1401  __ addsd(left, right);
1402  break;
1403  case Token::SUB:
1404  __ subsd(left, right);
1405  break;
1406  case Token::MUL:
1407  __ mulsd(left, right);
1408  break;
1409  case Token::DIV:
1410  __ divsd(left, right);
1411  break;
1412  case Token::MOD: {
1413  // Pass two doubles as arguments on the stack.
1414  __ PrepareCallCFunction(4, eax);
1415  __ movdbl(Operand(esp, 0 * kDoubleSize), left);
1416  __ movdbl(Operand(esp, 1 * kDoubleSize), right);
1417  __ CallCFunction(
1418  ExternalReference::double_fp_operation(Token::MOD, isolate()),
1419  4);
1420 
1421  // Return value is in st(0) on ia32.
1422  // Store it into the (fixed) result register.
1423  __ sub(Operand(esp), Immediate(kDoubleSize));
1424  __ fstp_d(Operand(esp, 0));
1425  __ movdbl(result, Operand(esp, 0));
1426  __ add(Operand(esp), Immediate(kDoubleSize));
1427  break;
1428  }
1429  default:
1430  UNREACHABLE();
1431  break;
1432  }
1433 }
1434 
1435 
1436 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1437  ASSERT(ToRegister(instr->context()).is(esi));
1438  ASSERT(ToRegister(instr->left()).is(edx));
1439  ASSERT(ToRegister(instr->right()).is(eax));
1440  ASSERT(ToRegister(instr->result()).is(eax));
1441 
1442  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1443  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1444  __ nop(); // Signals no inlined code.
1445 }
1446 
1447 
1448 int LCodeGen::GetNextEmittedBlock(int block) {
1449  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1450  LLabel* label = chunk_->GetLabel(i);
1451  if (!label->HasReplacement()) return i;
1452  }
1453  return -1;
1454 }
1455 
1456 
1457 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1458  int next_block = GetNextEmittedBlock(current_block_);
1459  right_block = chunk_->LookupDestination(right_block);
1460  left_block = chunk_->LookupDestination(left_block);
1461 
1462  if (right_block == left_block) {
1463  EmitGoto(left_block);
1464  } else if (left_block == next_block) {
1465  __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1466  } else if (right_block == next_block) {
1467  __ j(cc, chunk_->GetAssemblyLabel(left_block));
1468  } else {
1469  __ j(cc, chunk_->GetAssemblyLabel(left_block));
1470  __ jmp(chunk_->GetAssemblyLabel(right_block));
1471  }
1472 }
1473 
1474 
1475 void LCodeGen::DoBranch(LBranch* instr) {
1476  int true_block = chunk_->LookupDestination(instr->true_block_id());
1477  int false_block = chunk_->LookupDestination(instr->false_block_id());
1478 
1479  Representation r = instr->hydrogen()->value()->representation();
1480  if (r.IsInteger32()) {
1481  Register reg = ToRegister(instr->InputAt(0));
1482  __ test(reg, Operand(reg));
1483  EmitBranch(true_block, false_block, not_zero);
1484  } else if (r.IsDouble()) {
1485  XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
1486  __ xorps(xmm0, xmm0);
1487  __ ucomisd(reg, xmm0);
1488  EmitBranch(true_block, false_block, not_equal);
1489  } else {
1490  ASSERT(r.IsTagged());
1491  Register reg = ToRegister(instr->InputAt(0));
1492  HType type = instr->hydrogen()->value()->type();
1493  if (type.IsBoolean()) {
1494  __ cmp(reg, factory()->true_value());
1495  EmitBranch(true_block, false_block, equal);
1496  } else if (type.IsSmi()) {
1497  __ test(reg, Operand(reg));
1498  EmitBranch(true_block, false_block, not_equal);
1499  } else {
1500  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1501  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1502 
1503  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1504  // Avoid deopts in the case where we've never executed this path before.
1505  if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1506 
1507  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1508  // undefined -> false.
1509  __ cmp(reg, factory()->undefined_value());
1510  __ j(equal, false_label);
1511  }
1512  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1513  // true -> true.
1514  __ cmp(reg, factory()->true_value());
1515  __ j(equal, true_label);
1516  // false -> false.
1517  __ cmp(reg, factory()->false_value());
1518  __ j(equal, false_label);
1519  }
1520  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1521  // 'null' -> false.
1522  __ cmp(reg, factory()->null_value());
1523  __ j(equal, false_label);
1524  }
1525 
1526  if (expected.Contains(ToBooleanStub::SMI)) {
1527  // Smis: 0 -> false, all other -> true.
1528  __ test(reg, Operand(reg));
1529  __ j(equal, false_label);
1530  __ JumpIfSmi(reg, true_label);
1531  } else if (expected.NeedsMap()) {
1532  // If we need a map later and have a Smi -> deopt.
1533  __ test(reg, Immediate(kSmiTagMask));
1534  DeoptimizeIf(zero, instr->environment());
1535  }
1536 
1537  Register map = no_reg; // Keep the compiler happy.
1538  if (expected.NeedsMap()) {
1539  map = ToRegister(instr->TempAt(0));
1540  ASSERT(!map.is(reg));
1541  __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
1542 
1543  if (expected.CanBeUndetectable()) {
1544  // Undetectable -> false.
1545  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
1546  1 << Map::kIsUndetectable);
1547  __ j(not_zero, false_label);
1548  }
1549  }
1550 
1551  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1552  // spec object -> true.
1553  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
1554  __ j(above_equal, true_label);
1555  }
1556 
1557  if (expected.Contains(ToBooleanStub::STRING)) {
1558  // String value -> false iff empty.
1559  Label not_string;
1560  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
1561  __ j(above_equal, &not_string, Label::kNear);
1562  __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
1563  __ j(not_zero, true_label);
1564  __ jmp(false_label);
1565  __ bind(&not_string);
1566  }
1567 
1568  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1569  // heap number -> false iff +0, -0, or NaN.
1570  Label not_heap_number;
1572  factory()->heap_number_map());
1573  __ j(not_equal, &not_heap_number, Label::kNear);
1574  __ fldz();
1576  __ FCmp();
1577  __ j(zero, false_label);
1578  __ jmp(true_label);
1579  __ bind(&not_heap_number);
1580  }
1581 
1582  // We've seen something for the first time -> deopt.
1583  DeoptimizeIf(no_condition, instr->environment());
1584  }
1585  }
1586 }
1587 
1588 
1589 void LCodeGen::EmitGoto(int block) {
1590  block = chunk_->LookupDestination(block);
1591  int next_block = GetNextEmittedBlock(current_block_);
1592  if (block != next_block) {
1593  __ jmp(chunk_->GetAssemblyLabel(block));
1594  }
1595 }
1596 
1597 
1598 void LCodeGen::DoGoto(LGoto* instr) {
1599  EmitGoto(instr->block_id());
1600 }
1601 
1602 
1603 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1604  Condition cond = no_condition;
1605  switch (op) {
1606  case Token::EQ:
1607  case Token::EQ_STRICT:
1608  cond = equal;
1609  break;
1610  case Token::LT:
1611  cond = is_unsigned ? below : less;
1612  break;
1613  case Token::GT:
1614  cond = is_unsigned ? above : greater;
1615  break;
1616  case Token::LTE:
1617  cond = is_unsigned ? below_equal : less_equal;
1618  break;
1619  case Token::GTE:
1620  cond = is_unsigned ? above_equal : greater_equal;
1621  break;
1622  case Token::IN:
1623  case Token::INSTANCEOF:
1624  default:
1625  UNREACHABLE();
1626  }
1627  return cond;
1628 }
1629 
1630 
1631 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1632  LOperand* left = instr->InputAt(0);
1633  LOperand* right = instr->InputAt(1);
1634  int false_block = chunk_->LookupDestination(instr->false_block_id());
1635  int true_block = chunk_->LookupDestination(instr->true_block_id());
1636  Condition cc = TokenToCondition(instr->op(), instr->is_double());
1637 
1638  if (left->IsConstantOperand() && right->IsConstantOperand()) {
1639  // We can statically evaluate the comparison.
1640  double left_val = ToDouble(LConstantOperand::cast(left));
1641  double right_val = ToDouble(LConstantOperand::cast(right));
1642  int next_block =
1643  EvalComparison(instr->op(), left_val, right_val) ? true_block
1644  : false_block;
1645  EmitGoto(next_block);
1646  } else {
1647  if (instr->is_double()) {
1648  // Don't base result on EFLAGS when a NaN is involved. Instead
1649  // jump to the false block.
1650  __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1651  __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1652  } else {
1653  if (right->IsConstantOperand()) {
1654  __ cmp(ToRegister(left), ToInteger32Immediate(right));
1655  } else if (left->IsConstantOperand()) {
1656  __ cmp(ToOperand(right), ToInteger32Immediate(left));
1657  // We transposed the operands. Reverse the condition.
1658  cc = ReverseCondition(cc);
1659  } else {
1660  __ cmp(ToRegister(left), ToOperand(right));
1661  }
1662  }
1663  EmitBranch(true_block, false_block, cc);
1664  }
1665 }
1666 
1667 
1668 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1669  Register left = ToRegister(instr->InputAt(0));
1670  Operand right = ToOperand(instr->InputAt(1));
1671  int false_block = chunk_->LookupDestination(instr->false_block_id());
1672  int true_block = chunk_->LookupDestination(instr->true_block_id());
1673 
1674  __ cmp(left, Operand(right));
1675  EmitBranch(true_block, false_block, equal);
1676 }
1677 
1678 
1679 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1680  Register left = ToRegister(instr->InputAt(0));
1681  int true_block = chunk_->LookupDestination(instr->true_block_id());
1682  int false_block = chunk_->LookupDestination(instr->false_block_id());
1683 
1684  __ cmp(left, instr->hydrogen()->right());
1685  EmitBranch(true_block, false_block, equal);
1686 }
1687 
1688 
1689 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1690  Register reg = ToRegister(instr->InputAt(0));
1691  int false_block = chunk_->LookupDestination(instr->false_block_id());
1692 
1693  // If the expression is known to be untagged or a smi, then it's definitely
1694  // not null, and it can't be a an undetectable object.
1695  if (instr->hydrogen()->representation().IsSpecialization() ||
1696  instr->hydrogen()->type().IsSmi()) {
1697  EmitGoto(false_block);
1698  return;
1699  }
1700 
1701  int true_block = chunk_->LookupDestination(instr->true_block_id());
1702  Handle<Object> nil_value = instr->nil() == kNullValue ?
1703  factory()->null_value() :
1704  factory()->undefined_value();
1705  __ cmp(reg, nil_value);
1706  if (instr->kind() == kStrictEquality) {
1707  EmitBranch(true_block, false_block, equal);
1708  } else {
1709  Handle<Object> other_nil_value = instr->nil() == kNullValue ?
1710  factory()->undefined_value() :
1711  factory()->null_value();
1712  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1713  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1714  __ j(equal, true_label);
1715  __ cmp(reg, other_nil_value);
1716  __ j(equal, true_label);
1717  __ JumpIfSmi(reg, false_label);
1718  // Check for undetectable objects by looking in the bit field in
1719  // the map. The object has already been smi checked.
1720  Register scratch = ToRegister(instr->TempAt(0));
1721  __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1722  __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1723  __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1724  EmitBranch(true_block, false_block, not_zero);
1725  }
1726 }
1727 
1728 
1729 Condition LCodeGen::EmitIsObject(Register input,
1730  Register temp1,
1731  Label* is_not_object,
1732  Label* is_object) {
1733  __ JumpIfSmi(input, is_not_object);
1734 
1735  __ cmp(input, isolate()->factory()->null_value());
1736  __ j(equal, is_object);
1737 
1738  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
1739  // Undetectable objects behave like undefined.
1740  __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
1741  1 << Map::kIsUndetectable);
1742  __ j(not_zero, is_not_object);
1743 
1744  __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
1746  __ j(below, is_not_object);
1748  return below_equal;
1749 }
1750 
1751 
1752 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1753  Register reg = ToRegister(instr->InputAt(0));
1754  Register temp = ToRegister(instr->TempAt(0));
1755 
1756  int true_block = chunk_->LookupDestination(instr->true_block_id());
1757  int false_block = chunk_->LookupDestination(instr->false_block_id());
1758  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1759  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1760 
1761  Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
1762 
1763  EmitBranch(true_block, false_block, true_cond);
1764 }
1765 
1766 
1767 Condition LCodeGen::EmitIsString(Register input,
1768  Register temp1,
1769  Label* is_not_string) {
1770  __ JumpIfSmi(input, is_not_string);
1771 
1772  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
1773 
1774  return cond;
1775 }
1776 
1777 
1778 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1779  Register reg = ToRegister(instr->InputAt(0));
1780  Register temp = ToRegister(instr->TempAt(0));
1781 
1782  int true_block = chunk_->LookupDestination(instr->true_block_id());
1783  int false_block = chunk_->LookupDestination(instr->false_block_id());
1784  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1785 
1786  Condition true_cond = EmitIsString(reg, temp, false_label);
1787 
1788  EmitBranch(true_block, false_block, true_cond);
1789 }
1790 
1791 
1792 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1793  Operand input = ToOperand(instr->InputAt(0));
1794 
1795  int true_block = chunk_->LookupDestination(instr->true_block_id());
1796  int false_block = chunk_->LookupDestination(instr->false_block_id());
1797 
1798  __ test(input, Immediate(kSmiTagMask));
1799  EmitBranch(true_block, false_block, zero);
1800 }
1801 
1802 
1803 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1804  Register input = ToRegister(instr->InputAt(0));
1805  Register temp = ToRegister(instr->TempAt(0));
1806 
1807  int true_block = chunk_->LookupDestination(instr->true_block_id());
1808  int false_block = chunk_->LookupDestination(instr->false_block_id());
1809 
1810  STATIC_ASSERT(kSmiTag == 0);
1811  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1812  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
1813  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
1814  1 << Map::kIsUndetectable);
1815  EmitBranch(true_block, false_block, not_zero);
1816 }
1817 
1818 
1819 static Condition ComputeCompareCondition(Token::Value op) {
1820  switch (op) {
1821  case Token::EQ_STRICT:
1822  case Token::EQ:
1823  return equal;
1824  case Token::LT:
1825  return less;
1826  case Token::GT:
1827  return greater;
1828  case Token::LTE:
1829  return less_equal;
1830  case Token::GTE:
1831  return greater_equal;
1832  default:
1833  UNREACHABLE();
1834  return no_condition;
1835  }
1836 }
1837 
1838 
1839 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1840  Token::Value op = instr->op();
1841  int true_block = chunk_->LookupDestination(instr->true_block_id());
1842  int false_block = chunk_->LookupDestination(instr->false_block_id());
1843 
1844  Handle<Code> ic = CompareIC::GetUninitialized(op);
1845  CallCode(ic, RelocInfo::CODE_TARGET, instr);
1846 
1847  Condition condition = ComputeCompareCondition(op);
1848  __ test(eax, Operand(eax));
1849 
1850  EmitBranch(true_block, false_block, condition);
1851 }
1852 
1853 
1854 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1855  InstanceType from = instr->from();
1856  InstanceType to = instr->to();
1857  if (from == FIRST_TYPE) return to;
1858  ASSERT(from == to || to == LAST_TYPE);
1859  return from;
1860 }
1861 
1862 
1863 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1864  InstanceType from = instr->from();
1865  InstanceType to = instr->to();
1866  if (from == to) return equal;
1867  if (to == LAST_TYPE) return above_equal;
1868  if (from == FIRST_TYPE) return below_equal;
1869  UNREACHABLE();
1870  return equal;
1871 }
1872 
1873 
1874 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1875  Register input = ToRegister(instr->InputAt(0));
1876  Register temp = ToRegister(instr->TempAt(0));
1877 
1878  int true_block = chunk_->LookupDestination(instr->true_block_id());
1879  int false_block = chunk_->LookupDestination(instr->false_block_id());
1880 
1881  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1882 
1883  __ JumpIfSmi(input, false_label);
1884 
1885  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
1886  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
1887 }
1888 
1889 
1890 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1891  Register input = ToRegister(instr->InputAt(0));
1892  Register result = ToRegister(instr->result());
1893 
1894  if (FLAG_debug_code) {
1895  __ AbortIfNotString(input);
1896  }
1897 
1898  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
1899  __ IndexFromHash(result, result);
1900 }
1901 
1902 
1903 void LCodeGen::DoHasCachedArrayIndexAndBranch(
1904  LHasCachedArrayIndexAndBranch* instr) {
1905  Register input = ToRegister(instr->InputAt(0));
1906 
1907  int true_block = chunk_->LookupDestination(instr->true_block_id());
1908  int false_block = chunk_->LookupDestination(instr->false_block_id());
1909 
1912  EmitBranch(true_block, false_block, equal);
1913 }
1914 
1915 
1916 // Branches to a label or falls through with the answer in the z flag. Trashes
1917 // the temp registers, but not the input.
1918 void LCodeGen::EmitClassOfTest(Label* is_true,
1919  Label* is_false,
1920  Handle<String>class_name,
1921  Register input,
1922  Register temp,
1923  Register temp2) {
1924  ASSERT(!input.is(temp));
1925  ASSERT(!input.is(temp2));
1926  ASSERT(!temp.is(temp2));
1927  __ JumpIfSmi(input, is_false);
1928 
1929  if (class_name->IsEqualTo(CStrVector("Function"))) {
1930  // Assuming the following assertions, we can use the same compares to test
1931  // for both being a function type and being in the object type range.
1936  LAST_SPEC_OBJECT_TYPE - 1);
1938  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
1939  __ j(below, is_false);
1940  __ j(equal, is_true);
1941  __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
1942  __ j(equal, is_true);
1943  } else {
1944  // Faster code path to avoid two compares: subtract lower bound from the
1945  // actual type and do a signed compare with the width of the type range.
1946  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
1947  __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
1948  __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1949  __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
1951  __ j(above, is_false);
1952  }
1953 
1954  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
1955  // Check if the constructor in the map is a function.
1956  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
1957  // Objects with a non-function constructor have class 'Object'.
1958  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
1959  if (class_name->IsEqualTo(CStrVector("Object"))) {
1960  __ j(not_equal, is_true);
1961  } else {
1962  __ j(not_equal, is_false);
1963  }
1964 
1965  // temp now contains the constructor function. Grab the
1966  // instance class name from there.
1968  __ mov(temp, FieldOperand(temp,
1970  // The class name we are testing against is a symbol because it's a literal.
1971  // The name in the constructor is a symbol because of the way the context is
1972  // booted. This routine isn't expected to work for random API-created
1973  // classes and it doesn't have to because you can't access it with natives
1974  // syntax. Since both sides are symbols it is sufficient to use an identity
1975  // comparison.
1976  __ cmp(temp, class_name);
1977  // End with the answer in the z flag.
1978 }
1979 
1980 
1981 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1982  Register input = ToRegister(instr->InputAt(0));
1983  Register temp = ToRegister(instr->TempAt(0));
1984  Register temp2 = ToRegister(instr->TempAt(1));
1985 
1986  Handle<String> class_name = instr->hydrogen()->class_name();
1987 
1988  int true_block = chunk_->LookupDestination(instr->true_block_id());
1989  int false_block = chunk_->LookupDestination(instr->false_block_id());
1990 
1991  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1992  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1993 
1994  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1995 
1996  EmitBranch(true_block, false_block, equal);
1997 }
1998 
1999 
2000 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2001  Register reg = ToRegister(instr->InputAt(0));
2002  int true_block = instr->true_block_id();
2003  int false_block = instr->false_block_id();
2004 
2005  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2006  EmitBranch(true_block, false_block, equal);
2007 }
2008 
2009 
2010 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2011  // Object and function are in fixed registers defined by the stub.
2012  ASSERT(ToRegister(instr->context()).is(esi));
2013  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2014  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2015 
2016  Label true_value, done;
2017  __ test(eax, Operand(eax));
2018  __ j(zero, &true_value, Label::kNear);
2019  __ mov(ToRegister(instr->result()), factory()->false_value());
2020  __ jmp(&done, Label::kNear);
2021  __ bind(&true_value);
2022  __ mov(ToRegister(instr->result()), factory()->true_value());
2023  __ bind(&done);
2024 }
2025 
2026 
2027 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2028  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2029  public:
2030  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2031  LInstanceOfKnownGlobal* instr)
2032  : LDeferredCode(codegen), instr_(instr) { }
2033  virtual void Generate() {
2034  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2035  }
2036  virtual LInstruction* instr() { return instr_; }
2037  Label* map_check() { return &map_check_; }
2038  private:
2039  LInstanceOfKnownGlobal* instr_;
2040  Label map_check_;
2041  };
2042 
2043  DeferredInstanceOfKnownGlobal* deferred;
2044  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2045 
2046  Label done, false_result;
2047  Register object = ToRegister(instr->InputAt(1));
2048  Register temp = ToRegister(instr->TempAt(0));
2049 
2050  // A Smi is not an instance of anything.
2051  __ JumpIfSmi(object, &false_result);
2052 
2053  // This is the inlined call site instanceof cache. The two occurences of the
2054  // hole value will be patched to the last map/result pair generated by the
2055  // instanceof stub.
2056  Label cache_miss;
2057  Register map = ToRegister(instr->TempAt(0));
2058  __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2059  __ bind(deferred->map_check()); // Label for calculating code patching.
2060  Handle<JSGlobalPropertyCell> cache_cell =
2061  factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2062  __ cmp(map, Operand::Cell(cache_cell)); // Patched to cached map.
2063  __ j(not_equal, &cache_miss, Label::kNear);
2064  __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
2065  __ jmp(&done);
2066 
2067  // The inlined call site cache did not match. Check for null and string
2068  // before calling the deferred code.
2069  __ bind(&cache_miss);
2070  // Null is not an instance of anything.
2071  __ cmp(object, factory()->null_value());
2072  __ j(equal, &false_result);
2073 
2074  // String values are not instances of anything.
2075  Condition is_string = masm_->IsObjectStringType(object, temp, temp);
2076  __ j(is_string, &false_result);
2077 
2078  // Go to the deferred code.
2079  __ jmp(deferred->entry());
2080 
2081  __ bind(&false_result);
2082  __ mov(ToRegister(instr->result()), factory()->false_value());
2083 
2084  // Here result has either true or false. Deferred code also produces true or
2085  // false object.
2086  __ bind(deferred->exit());
2087  __ bind(&done);
2088 }
2089 
2090 
2091 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2092  Label* map_check) {
2093  PushSafepointRegistersScope scope(this);
2094 
2096  flags = static_cast<InstanceofStub::Flags>(
2098  flags = static_cast<InstanceofStub::Flags>(
2100  flags = static_cast<InstanceofStub::Flags>(
2102  InstanceofStub stub(flags);
2103 
2104  // Get the temp register reserved by the instruction. This needs to be a
2105  // register which is pushed last by PushSafepointRegisters as top of the
2106  // stack is used to pass the offset to the location of the map check to
2107  // the stub.
2108  Register temp = ToRegister(instr->TempAt(0));
2109  ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
2110  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2111  static const int kAdditionalDelta = 13;
2112  int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2113  __ mov(temp, Immediate(delta));
2114  __ StoreToSafepointRegisterSlot(temp, temp);
2115  CallCodeGeneric(stub.GetCode(),
2116  RelocInfo::CODE_TARGET,
2117  instr,
2118  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2119  // Get the deoptimization index of the LLazyBailout-environment that
2120  // corresponds to this instruction.
2121  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2122  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2123 
2124  // Put the result value into the eax slot and restore all registers.
2125  __ StoreToSafepointRegisterSlot(eax, eax);
2126 }
2127 
2128 
2129 void LCodeGen::DoCmpT(LCmpT* instr) {
2130  Token::Value op = instr->op();
2131 
2132  Handle<Code> ic = CompareIC::GetUninitialized(op);
2133  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2134 
2135  Condition condition = ComputeCompareCondition(op);
2136  Label true_value, done;
2137  __ test(eax, Operand(eax));
2138  __ j(condition, &true_value, Label::kNear);
2139  __ mov(ToRegister(instr->result()), factory()->false_value());
2140  __ jmp(&done, Label::kNear);
2141  __ bind(&true_value);
2142  __ mov(ToRegister(instr->result()), factory()->true_value());
2143  __ bind(&done);
2144 }
2145 
2146 
2147 void LCodeGen::DoReturn(LReturn* instr) {
2148  if (FLAG_trace) {
2149  // Preserve the return value on the stack and rely on the runtime call
2150  // to return the value in the same register. We're leaving the code
2151  // managed by the register allocator and tearing down the frame, it's
2152  // safe to write to the context register.
2153  __ push(eax);
2155  __ CallRuntime(Runtime::kTraceExit, 1);
2156  }
2157  if (dynamic_frame_alignment_) {
2158  // Fetch the state of the dynamic frame alignment.
2159  __ mov(edx, Operand(ebp,
2161  }
2162  __ mov(esp, ebp);
2163  __ pop(ebp);
2164  if (dynamic_frame_alignment_) {
2165  Label no_padding;
2166  __ cmp(edx, Immediate(kNoAlignmentPadding));
2167  __ j(equal, &no_padding);
2168  if (FLAG_debug_code) {
2169  __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
2170  Immediate(kAlignmentZapValue));
2171  __ Assert(equal, "expected alignment marker");
2172  }
2173  __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
2174  __ bind(&no_padding);
2175  }
2176  __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
2177 }
2178 
2179 
2180 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2181  Register result = ToRegister(instr->result());
2182  __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
2183  if (instr->hydrogen()->RequiresHoleCheck()) {
2184  __ cmp(result, factory()->the_hole_value());
2185  DeoptimizeIf(equal, instr->environment());
2186  }
2187 }
2188 
2189 
2190 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2191  ASSERT(ToRegister(instr->context()).is(esi));
2192  ASSERT(ToRegister(instr->global_object()).is(edx));
2193  ASSERT(ToRegister(instr->result()).is(eax));
2194 
2195  __ mov(ecx, instr->name());
2196  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
2197  RelocInfo::CODE_TARGET_CONTEXT;
2198  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2199  CallCode(ic, mode, instr);
2200 }
2201 
2202 
2203 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2204  Register value = ToRegister(instr->value());
2205  Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
2206 
2207  // If the cell we are storing to contains the hole it could have
2208  // been deleted from the property dictionary. In that case, we need
2209  // to update the property details in the property dictionary to mark
2210  // it as no longer deleted. We deoptimize in that case.
2211  if (instr->hydrogen()->RequiresHoleCheck()) {
2212  __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
2213  DeoptimizeIf(equal, instr->environment());
2214  }
2215 
2216  // Store the value.
2217  __ mov(Operand::Cell(cell_handle), value);
2218  // Cells are always rescanned, so no write barrier here.
2219 }
2220 
2221 
2222 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2223  ASSERT(ToRegister(instr->context()).is(esi));
2224  ASSERT(ToRegister(instr->global_object()).is(edx));
2225  ASSERT(ToRegister(instr->value()).is(eax));
2226 
2227  __ mov(ecx, instr->name());
2228  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2229  ? isolate()->builtins()->StoreIC_Initialize_Strict()
2230  : isolate()->builtins()->StoreIC_Initialize();
2231  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2232 }
2233 
2234 
2235 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2236  Register context = ToRegister(instr->context());
2237  Register result = ToRegister(instr->result());
2238  __ mov(result, ContextOperand(context, instr->slot_index()));
2239 
2240  if (instr->hydrogen()->RequiresHoleCheck()) {
2241  __ cmp(result, factory()->the_hole_value());
2242  if (instr->hydrogen()->DeoptimizesOnHole()) {
2243  DeoptimizeIf(equal, instr->environment());
2244  } else {
2245  Label is_not_hole;
2246  __ j(not_equal, &is_not_hole, Label::kNear);
2247  __ mov(result, factory()->undefined_value());
2248  __ bind(&is_not_hole);
2249  }
2250  }
2251 }
2252 
2253 
2254 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2255  Register context = ToRegister(instr->context());
2256  Register value = ToRegister(instr->value());
2257 
2258  Label skip_assignment;
2259 
2260  Operand target = ContextOperand(context, instr->slot_index());
2261  if (instr->hydrogen()->RequiresHoleCheck()) {
2262  __ cmp(target, factory()->the_hole_value());
2263  if (instr->hydrogen()->DeoptimizesOnHole()) {
2264  DeoptimizeIf(equal, instr->environment());
2265  } else {
2266  __ j(not_equal, &skip_assignment, Label::kNear);
2267  }
2268  }
2269 
2270  __ mov(target, value);
2271  if (instr->hydrogen()->NeedsWriteBarrier()) {
2272  HType type = instr->hydrogen()->value()->type();
2273  SmiCheck check_needed =
2274  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2275  Register temp = ToRegister(instr->TempAt(0));
2276  int offset = Context::SlotOffset(instr->slot_index());
2277  __ RecordWriteContextSlot(context,
2278  offset,
2279  value,
2280  temp,
2281  kSaveFPRegs,
2283  check_needed);
2284  }
2285 
2286  __ bind(&skip_assignment);
2287 }
2288 
2289 
2290 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2291  Register object = ToRegister(instr->object());
2292  Register result = ToRegister(instr->result());
2293  if (instr->hydrogen()->is_in_object()) {
2294  __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
2295  } else {
2296  __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2297  __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
2298  }
2299 }
2300 
2301 
2302 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2303  Register object,
2304  Handle<Map> type,
2305  Handle<String> name,
2306  LEnvironment* env) {
2307  LookupResult lookup(isolate());
2308  type->LookupInDescriptors(NULL, *name, &lookup);
2309  ASSERT(lookup.IsFound() || lookup.IsCacheable());
2310  if (lookup.IsFound() && lookup.type() == FIELD) {
2311  int index = lookup.GetLocalFieldIndexFromMap(*type);
2312  int offset = index * kPointerSize;
2313  if (index < 0) {
2314  // Negative property indices are in-object properties, indexed
2315  // from the end of the fixed part of the object.
2316  __ mov(result, FieldOperand(object, offset + type->instance_size()));
2317  } else {
2318  // Non-negative property indices are in the properties array.
2319  __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2320  __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
2321  }
2322  } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
2323  Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2324  __ LoadHeapObject(result, function);
2325  } else {
2326  // Negative lookup.
2327  // Check prototypes.
2328  HeapObject* current = HeapObject::cast((*type)->prototype());
2329  Heap* heap = type->GetHeap();
2330  while (current != heap->null_value()) {
2331  Handle<HeapObject> link(current);
2332  __ LoadHeapObject(result, link);
2333  __ cmp(FieldOperand(result, HeapObject::kMapOffset),
2334  Handle<Map>(JSObject::cast(current)->map()));
2335  DeoptimizeIf(not_equal, env);
2336  current = HeapObject::cast(current->map()->prototype());
2337  }
2338  __ mov(result, factory()->undefined_value());
2339  }
2340 }
2341 
2342 
2343 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
2344  ASSERT(!operand->IsDoubleRegister());
2345  if (operand->IsConstantOperand()) {
2346  Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
2347  if (object->IsSmi()) {
2348  __ Push(Handle<Smi>::cast(object));
2349  } else {
2350  __ PushHeapObject(Handle<HeapObject>::cast(object));
2351  }
2352  } else if (operand->IsRegister()) {
2353  __ push(ToRegister(operand));
2354  } else {
2355  __ push(ToOperand(operand));
2356  }
2357 }
2358 
2359 
2360 // Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
2361 // prototype chain, which causes unbounded code generation.
2362 static bool CompactEmit(
2363  SmallMapList* list, Handle<String> name, int i, Isolate* isolate) {
2364  LookupResult lookup(isolate);
2365  Handle<Map> map = list->at(i);
2366  map->LookupInDescriptors(NULL, *name, &lookup);
2367  return lookup.IsFound() &&
2368  (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION);
2369 }
2370 
2371 
2372 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2373  Register object = ToRegister(instr->object());
2374  Register result = ToRegister(instr->result());
2375 
2376  int map_count = instr->hydrogen()->types()->length();
2377  bool need_generic = instr->hydrogen()->need_generic();
2378 
2379  if (map_count == 0 && !need_generic) {
2380  DeoptimizeIf(no_condition, instr->environment());
2381  return;
2382  }
2383  Handle<String> name = instr->hydrogen()->name();
2384  Label done;
2385  bool all_are_compact = true;
2386  for (int i = 0; i < map_count; ++i) {
2387  if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
2388  all_are_compact = false;
2389  break;
2390  }
2391  }
2392  for (int i = 0; i < map_count; ++i) {
2393  bool last = (i == map_count - 1);
2394  Handle<Map> map = instr->hydrogen()->types()->at(i);
2395  Label check_passed;
2396  __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
2397  if (last && !need_generic) {
2398  DeoptimizeIf(not_equal, instr->environment());
2399  __ bind(&check_passed);
2400  EmitLoadFieldOrConstantFunction(
2401  result, object, map, name, instr->environment());
2402  } else {
2403  Label next;
2404  bool compact = all_are_compact ? true :
2405  CompactEmit(instr->hydrogen()->types(), name, i, isolate());
2406  __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
2407  __ bind(&check_passed);
2408  EmitLoadFieldOrConstantFunction(
2409  result, object, map, name, instr->environment());
2410  __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
2411  __ bind(&next);
2412  }
2413  }
2414  if (need_generic) {
2415  __ mov(ecx, name);
2416  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2417  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2418  }
2419  __ bind(&done);
2420 }
2421 
2422 
2423 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2424  ASSERT(ToRegister(instr->context()).is(esi));
2425  ASSERT(ToRegister(instr->object()).is(edx));
2426  ASSERT(ToRegister(instr->result()).is(eax));
2427 
2428  __ mov(ecx, instr->name());
2429  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2430  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2431 }
2432 
2433 
2434 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2435  Register function = ToRegister(instr->function());
2436  Register temp = ToRegister(instr->TempAt(0));
2437  Register result = ToRegister(instr->result());
2438 
2439  // Check that the function really is a function.
2440  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2441  DeoptimizeIf(not_equal, instr->environment());
2442 
2443  // Check whether the function has an instance prototype.
2444  Label non_instance;
2445  __ test_b(FieldOperand(result, Map::kBitFieldOffset),
2447  __ j(not_zero, &non_instance, Label::kNear);
2448 
2449  // Get the prototype or initial map from the function.
2450  __ mov(result,
2452 
2453  // Check that the function has a prototype or an initial map.
2454  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
2455  DeoptimizeIf(equal, instr->environment());
2456 
2457  // If the function does not have an initial map, we're done.
2458  Label done;
2459  __ CmpObjectType(result, MAP_TYPE, temp);
2460  __ j(not_equal, &done, Label::kNear);
2461 
2462  // Get the prototype from the initial map.
2463  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
2464  __ jmp(&done, Label::kNear);
2465 
2466  // Non-instance prototype: Fetch prototype from constructor field
2467  // in the function's map.
2468  __ bind(&non_instance);
2469  __ mov(result, FieldOperand(result, Map::kConstructorOffset));
2470 
2471  // All done.
2472  __ bind(&done);
2473 }
2474 
2475 
2476 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2477  Register result = ToRegister(instr->result());
2478  Register input = ToRegister(instr->InputAt(0));
2479  __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
2480  if (FLAG_debug_code) {
2481  Label done, ok, fail;
2482  __ cmp(FieldOperand(result, HeapObject::kMapOffset),
2483  Immediate(factory()->fixed_array_map()));
2484  __ j(equal, &done, Label::kNear);
2485  __ cmp(FieldOperand(result, HeapObject::kMapOffset),
2486  Immediate(factory()->fixed_cow_array_map()));
2487  __ j(equal, &done, Label::kNear);
2488  Register temp((result.is(eax)) ? ebx : eax);
2489  __ push(temp);
2490  __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
2491  __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
2492  __ and_(temp, Map::kElementsKindMask);
2493  __ shr(temp, Map::kElementsKindShift);
2494  __ cmp(temp, GetInitialFastElementsKind());
2495  __ j(less, &fail, Label::kNear);
2496  __ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
2497  __ j(less_equal, &ok, Label::kNear);
2499  __ j(less, &fail, Label::kNear);
2501  __ j(less_equal, &ok, Label::kNear);
2502  __ bind(&fail);
2503  __ Abort("Check for fast or external elements failed.");
2504  __ bind(&ok);
2505  __ pop(temp);
2506  __ bind(&done);
2507  }
2508 }
2509 
2510 
2511 void LCodeGen::DoLoadExternalArrayPointer(
2512  LLoadExternalArrayPointer* instr) {
2513  Register result = ToRegister(instr->result());
2514  Register input = ToRegister(instr->InputAt(0));
2515  __ mov(result, FieldOperand(input,
2517 }
2518 
2519 
2520 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2521  Register arguments = ToRegister(instr->arguments());
2522  Register length = ToRegister(instr->length());
2523  Operand index = ToOperand(instr->index());
2524  Register result = ToRegister(instr->result());
2525 
2526  __ sub(length, index);
2527  DeoptimizeIf(below_equal, instr->environment());
2528 
2529  // There are two words between the frame pointer and the last argument.
2530  // Subtracting from length accounts for one of them add one more.
2531  __ mov(result, Operand(arguments, length, times_4, kPointerSize));
2532 }
2533 
2534 
2535 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2536  Register result = ToRegister(instr->result());
2537 
2538  // Load the result.
2539  __ mov(result,
2540  BuildFastArrayOperand(instr->elements(),
2541  instr->key(),
2542  FAST_ELEMENTS,
2544  instr->additional_index()));
2545 
2546  // Check for the hole value.
2547  if (instr->hydrogen()->RequiresHoleCheck()) {
2548  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2549  __ test(result, Immediate(kSmiTagMask));
2550  DeoptimizeIf(not_equal, instr->environment());
2551  } else {
2552  __ cmp(result, factory()->the_hole_value());
2553  DeoptimizeIf(equal, instr->environment());
2554  }
2555  }
2556 }
2557 
2558 
2559 void LCodeGen::DoLoadKeyedFastDoubleElement(
2560  LLoadKeyedFastDoubleElement* instr) {
2561  XMMRegister result = ToDoubleRegister(instr->result());
2562 
2563  if (instr->hydrogen()->RequiresHoleCheck()) {
2564  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
2565  sizeof(kHoleNanLower32);
2566  Operand hole_check_operand = BuildFastArrayOperand(
2567  instr->elements(), instr->key(),
2569  offset,
2570  instr->additional_index());
2571  __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
2572  DeoptimizeIf(equal, instr->environment());
2573  }
2574 
2575  Operand double_load_operand = BuildFastArrayOperand(
2576  instr->elements(),
2577  instr->key(),
2580  instr->additional_index());
2581  __ movdbl(result, double_load_operand);
2582 }
2583 
2584 
2585 Operand LCodeGen::BuildFastArrayOperand(
2586  LOperand* elements_pointer,
2587  LOperand* key,
2588  ElementsKind elements_kind,
2589  uint32_t offset,
2590  uint32_t additional_index) {
2591  Register elements_pointer_reg = ToRegister(elements_pointer);
2592  int shift_size = ElementsKindToShiftSize(elements_kind);
2593  if (key->IsConstantOperand()) {
2594  int constant_value = ToInteger32(LConstantOperand::cast(key));
2595  if (constant_value & 0xF0000000) {
2596  Abort("array index constant value too big");
2597  }
2598  return Operand(elements_pointer_reg,
2599  ((constant_value + additional_index) << shift_size)
2600  + offset);
2601  } else {
2602  ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
2603  return Operand(elements_pointer_reg,
2604  ToRegister(key),
2605  scale_factor,
2606  offset + (additional_index << shift_size));
2607  }
2608 }
2609 
2610 
2611 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2612  LLoadKeyedSpecializedArrayElement* instr) {
2613  ElementsKind elements_kind = instr->elements_kind();
2614  Operand operand(BuildFastArrayOperand(instr->external_pointer(),
2615  instr->key(),
2616  elements_kind,
2617  0,
2618  instr->additional_index()));
2619  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2620  XMMRegister result(ToDoubleRegister(instr->result()));
2621  __ movss(result, operand);
2622  __ cvtss2sd(result, result);
2623  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2624  __ movdbl(ToDoubleRegister(instr->result()), operand);
2625  } else {
2626  Register result(ToRegister(instr->result()));
2627  switch (elements_kind) {
2629  __ movsx_b(result, operand);
2630  break;
2633  __ movzx_b(result, operand);
2634  break;
2636  __ movsx_w(result, operand);
2637  break;
2639  __ movzx_w(result, operand);
2640  break;
2641  case EXTERNAL_INT_ELEMENTS:
2642  __ mov(result, operand);
2643  break;
2645  __ mov(result, operand);
2646  __ test(result, Operand(result));
2647  // TODO(danno): we could be more clever here, perhaps having a special
2648  // version of the stub that detects if the overflow case actually
2649  // happens, and generate code that returns a double rather than int.
2650  DeoptimizeIf(negative, instr->environment());
2651  break;
2654  case FAST_SMI_ELEMENTS:
2655  case FAST_ELEMENTS:
2656  case FAST_DOUBLE_ELEMENTS:
2658  case FAST_HOLEY_ELEMENTS:
2660  case DICTIONARY_ELEMENTS:
2662  UNREACHABLE();
2663  break;
2664  }
2665  }
2666 }
2667 
2668 
2669 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2670  ASSERT(ToRegister(instr->context()).is(esi));
2671  ASSERT(ToRegister(instr->object()).is(edx));
2672  ASSERT(ToRegister(instr->key()).is(ecx));
2673 
2674  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2675  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2676 }
2677 
2678 
2679 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2680  Register result = ToRegister(instr->result());
2681 
2682  if (instr->hydrogen()->from_inlined()) {
2683  __ lea(result, Operand(esp, -2 * kPointerSize));
2684  } else {
2685  // Check for arguments adapter frame.
2686  Label done, adapted;
2687  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2688  __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
2689  __ cmp(Operand(result),
2691  __ j(equal, &adapted, Label::kNear);
2692 
2693  // No arguments adaptor frame.
2694  __ mov(result, Operand(ebp));
2695  __ jmp(&done, Label::kNear);
2696 
2697  // Arguments adaptor frame present.
2698  __ bind(&adapted);
2699  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2700 
2701  // Result is the frame pointer for the frame if not adapted and for the real
2702  // frame below the adaptor frame if adapted.
2703  __ bind(&done);
2704  }
2705 }
2706 
2707 
2708 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2709  Operand elem = ToOperand(instr->InputAt(0));
2710  Register result = ToRegister(instr->result());
2711 
2712  Label done;
2713 
2714  // If no arguments adaptor frame the number of arguments is fixed.
2715  __ cmp(ebp, elem);
2716  __ mov(result, Immediate(scope()->num_parameters()));
2717  __ j(equal, &done, Label::kNear);
2718 
2719  // Arguments adaptor frame present. Get argument length from there.
2720  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2721  __ mov(result, Operand(result,
2723  __ SmiUntag(result);
2724 
2725  // Argument length is in result register.
2726  __ bind(&done);
2727 }
2728 
2729 
2730 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2731  Register receiver = ToRegister(instr->receiver());
2732  Register function = ToRegister(instr->function());
2733  Register scratch = ToRegister(instr->TempAt(0));
2734 
2735  // If the receiver is null or undefined, we have to pass the global
2736  // object as a receiver to normal functions. Values have to be
2737  // passed unchanged to builtins and strict-mode functions.
2738  Label global_object, receiver_ok;
2739 
2740  // Do not transform the receiver to object for strict mode
2741  // functions.
2742  __ mov(scratch,
2746  __ j(not_equal, &receiver_ok, Label::kNear);
2747 
2748  // Do not transform the receiver to object for builtins.
2751  __ j(not_equal, &receiver_ok, Label::kNear);
2752 
2753  // Normal function. Replace undefined or null with global receiver.
2754  __ cmp(receiver, factory()->null_value());
2755  __ j(equal, &global_object, Label::kNear);
2756  __ cmp(receiver, factory()->undefined_value());
2757  __ j(equal, &global_object, Label::kNear);
2758 
2759  // The receiver should be a JS object.
2760  __ test(receiver, Immediate(kSmiTagMask));
2761  DeoptimizeIf(equal, instr->environment());
2762  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
2763  DeoptimizeIf(below, instr->environment());
2764  __ jmp(&receiver_ok, Label::kNear);
2765 
2766  __ bind(&global_object);
2767  // TODO(kmillikin): We have a hydrogen value for the global object. See
2768  // if it's better to use it than to explicitly fetch it from the context
2769  // here.
2770  __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
2771  __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
2772  __ mov(receiver,
2774  __ bind(&receiver_ok);
2775 }
2776 
2777 
2778 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2779  Register receiver = ToRegister(instr->receiver());
2780  Register function = ToRegister(instr->function());
2781  Register length = ToRegister(instr->length());
2782  Register elements = ToRegister(instr->elements());
2783  ASSERT(receiver.is(eax)); // Used for parameter count.
2784  ASSERT(function.is(edi)); // Required by InvokeFunction.
2785  ASSERT(ToRegister(instr->result()).is(eax));
2786 
2787  // Copy the arguments to this function possibly from the
2788  // adaptor frame below it.
2789  const uint32_t kArgumentsLimit = 1 * KB;
2790  __ cmp(length, kArgumentsLimit);
2791  DeoptimizeIf(above, instr->environment());
2792 
2793  __ push(receiver);
2794  __ mov(receiver, length);
2795 
2796  // Loop through the arguments pushing them onto the execution
2797  // stack.
2798  Label invoke, loop;
2799  // length is a small non-negative integer, due to the test above.
2800  __ test(length, Operand(length));
2801  __ j(zero, &invoke, Label::kNear);
2802  __ bind(&loop);
2803  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
2804  __ dec(length);
2805  __ j(not_zero, &loop);
2806 
2807  // Invoke the function.
2808  __ bind(&invoke);
2809  ASSERT(instr->HasPointerMap());
2810  LPointerMap* pointers = instr->pointer_map();
2811  RecordPosition(pointers->position());
2812  SafepointGenerator safepoint_generator(
2813  this, pointers, Safepoint::kLazyDeopt);
2814  ParameterCount actual(eax);
2815  __ InvokeFunction(function, actual, CALL_FUNCTION,
2816  safepoint_generator, CALL_AS_METHOD);
2817 }
2818 
2819 
2820 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2821  LOperand* argument = instr->InputAt(0);
2822  EmitPushTaggedOperand(argument);
2823 }
2824 
2825 
2826 void LCodeGen::DoDrop(LDrop* instr) {
2827  __ Drop(instr->count());
2828 }
2829 
2830 
2831 void LCodeGen::DoThisFunction(LThisFunction* instr) {
2832  Register result = ToRegister(instr->result());
2833  __ LoadHeapObject(result, instr->hydrogen()->closure());
2834 }
2835 
2836 
2837 void LCodeGen::DoContext(LContext* instr) {
2838  Register result = ToRegister(instr->result());
2839  __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
2840 }
2841 
2842 
2843 void LCodeGen::DoOuterContext(LOuterContext* instr) {
2844  Register context = ToRegister(instr->context());
2845  Register result = ToRegister(instr->result());
2846  __ mov(result,
2847  Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2848 }
2849 
2850 
2851 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2852  ASSERT(ToRegister(instr->InputAt(0)).is(esi));
2853  __ push(esi); // The context is the first argument.
2854  __ push(Immediate(instr->hydrogen()->pairs()));
2855  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
2856  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
2857 }
2858 
2859 
2860 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2861  Register context = ToRegister(instr->context());
2862  Register result = ToRegister(instr->result());
2863  __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
2864 }
2865 
2866 
2867 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2868  Register global = ToRegister(instr->global());
2869  Register result = ToRegister(instr->result());
2870  __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
2871 }
2872 
2873 
2874 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2875  int arity,
2876  LInstruction* instr,
2877  CallKind call_kind,
2878  EDIState edi_state) {
2879  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
2880  function->shared()->formal_parameter_count() == arity;
2881 
2882  LPointerMap* pointers = instr->pointer_map();
2883  RecordPosition(pointers->position());
2884 
2885  if (can_invoke_directly) {
2886  if (edi_state == EDI_UNINITIALIZED) {
2887  __ LoadHeapObject(edi, function);
2888  }
2889 
2890  // Change context if needed.
2891  bool change_context =
2892  (info()->closure()->context() != function->context()) ||
2893  scope()->contains_with() ||
2894  (scope()->num_heap_slots() > 0);
2895 
2896  if (change_context) {
2898  } else {
2900  }
2901 
2902  // Set eax to arguments count if adaption is not needed. Assumes that eax
2903  // is available to write to at this point.
2904  if (!function->NeedsArgumentsAdaption()) {
2905  __ mov(eax, arity);
2906  }
2907 
2908  // Invoke function directly.
2909  __ SetCallKind(ecx, call_kind);
2910  if (*function == *info()->closure()) {
2911  __ CallSelf();
2912  } else {
2914  }
2915  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2916  } else {
2917  // We need to adapt arguments.
2918  SafepointGenerator generator(
2919  this, pointers, Safepoint::kLazyDeopt);
2920  ParameterCount count(arity);
2921  __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
2922  }
2923 }
2924 
2925 
2926 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2927  ASSERT(ToRegister(instr->result()).is(eax));
2928  CallKnownFunction(instr->function(),
2929  instr->arity(),
2930  instr,
2932  EDI_UNINITIALIZED);
2933 }
2934 
2935 
2936 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2937  Register input_reg = ToRegister(instr->value());
2938  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2939  factory()->heap_number_map());
2940  DeoptimizeIf(not_equal, instr->environment());
2941 
2942  Label done;
2943  Register tmp = input_reg.is(eax) ? ecx : eax;
2944  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
2945 
2946  // Preserve the value of all registers.
2947  PushSafepointRegistersScope scope(this);
2948 
2949  Label negative;
2950  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2951  // Check the sign of the argument. If the argument is positive, just
2952  // return it. We do not need to patch the stack since |input| and
2953  // |result| are the same register and |input| will be restored
2954  // unchanged by popping safepoint registers.
2955  __ test(tmp, Immediate(HeapNumber::kSignMask));
2956  __ j(not_zero, &negative);
2957  __ jmp(&done);
2958 
2959  __ bind(&negative);
2960 
2961  Label allocated, slow;
2962  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
2963  __ jmp(&allocated);
2964 
2965  // Slow case: Call the runtime system to do the number allocation.
2966  __ bind(&slow);
2967 
2968  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
2969  instr, instr->context());
2970 
2971  // Set the pointer to the new heap number in tmp.
2972  if (!tmp.is(eax)) __ mov(tmp, eax);
2973 
2974  // Restore input_reg after call to runtime.
2975  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
2976 
2977  __ bind(&allocated);
2978  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2979  __ and_(tmp2, ~HeapNumber::kSignMask);
2980  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
2981  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
2982  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
2983  __ StoreToSafepointRegisterSlot(input_reg, tmp);
2984 
2985  __ bind(&done);
2986 }
2987 
2988 
2989 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2990  Register input_reg = ToRegister(instr->value());
2991  __ test(input_reg, Operand(input_reg));
2992  Label is_positive;
2993  __ j(not_sign, &is_positive);
2994  __ neg(input_reg);
2995  __ test(input_reg, Operand(input_reg));
2996  DeoptimizeIf(negative, instr->environment());
2997  __ bind(&is_positive);
2998 }
2999 
3000 
3001 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3002  // Class for deferred case.
3003  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3004  public:
3005  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3006  LUnaryMathOperation* instr)
3007  : LDeferredCode(codegen), instr_(instr) { }
3008  virtual void Generate() {
3009  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3010  }
3011  virtual LInstruction* instr() { return instr_; }
3012  private:
3013  LUnaryMathOperation* instr_;
3014  };
3015 
3016  ASSERT(instr->value()->Equals(instr->result()));
3017  Representation r = instr->hydrogen()->value()->representation();
3018 
3019  if (r.IsDouble()) {
3020  XMMRegister scratch = xmm0;
3021  XMMRegister input_reg = ToDoubleRegister(instr->value());
3022  __ xorps(scratch, scratch);
3023  __ subsd(scratch, input_reg);
3024  __ pand(input_reg, scratch);
3025  } else if (r.IsInteger32()) {
3026  EmitIntegerMathAbs(instr);
3027  } else { // Tagged case.
3028  DeferredMathAbsTaggedHeapNumber* deferred =
3029  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3030  Register input_reg = ToRegister(instr->value());
3031  // Smi check.
3032  __ JumpIfNotSmi(input_reg, deferred->entry());
3033  EmitIntegerMathAbs(instr);
3034  __ bind(deferred->exit());
3035  }
3036 }
3037 
3038 
3039 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3040  XMMRegister xmm_scratch = xmm0;
3041  Register output_reg = ToRegister(instr->result());
3042  XMMRegister input_reg = ToDoubleRegister(instr->value());
3043 
3045  CpuFeatures::Scope scope(SSE4_1);
3046  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3047  // Deoptimize on negative zero.
3048  Label non_zero;
3049  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3050  __ ucomisd(input_reg, xmm_scratch);
3051  __ j(not_equal, &non_zero, Label::kNear);
3052  __ movmskpd(output_reg, input_reg);
3053  __ test(output_reg, Immediate(1));
3054  DeoptimizeIf(not_zero, instr->environment());
3055  __ bind(&non_zero);
3056  }
3057  __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3058  __ cvttsd2si(output_reg, Operand(xmm_scratch));
3059  // Overflow is signalled with minint.
3060  __ cmp(output_reg, 0x80000000u);
3061  DeoptimizeIf(equal, instr->environment());
3062  } else {
3063  Label negative_sign;
3064  Label done;
3065  // Deoptimize on unordered.
3066  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3067  __ ucomisd(input_reg, xmm_scratch);
3068  DeoptimizeIf(parity_even, instr->environment());
3069  __ j(below, &negative_sign, Label::kNear);
3070 
3071  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3072  // Check for negative zero.
3073  Label positive_sign;
3074  __ j(above, &positive_sign, Label::kNear);
3075  __ movmskpd(output_reg, input_reg);
3076  __ test(output_reg, Immediate(1));
3077  DeoptimizeIf(not_zero, instr->environment());
3078  __ Set(output_reg, Immediate(0));
3079  __ jmp(&done, Label::kNear);
3080  __ bind(&positive_sign);
3081  }
3082 
3083  // Use truncating instruction (OK because input is positive).
3084  __ cvttsd2si(output_reg, Operand(input_reg));
3085  // Overflow is signalled with minint.
3086  __ cmp(output_reg, 0x80000000u);
3087  DeoptimizeIf(equal, instr->environment());
3088  __ jmp(&done, Label::kNear);
3089 
3090  // Non-zero negative reaches here
3091  __ bind(&negative_sign);
3092  // Truncate, then compare and compensate
3093  __ cvttsd2si(output_reg, Operand(input_reg));
3094  __ cvtsi2sd(xmm_scratch, output_reg);
3095  __ ucomisd(input_reg, xmm_scratch);
3096  __ j(equal, &done, Label::kNear);
3097  __ sub(output_reg, Immediate(1));
3098  DeoptimizeIf(overflow, instr->environment());
3099 
3100  __ bind(&done);
3101  }
3102 }
3103 
3104 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3105  XMMRegister xmm_scratch = xmm0;
3106  Register output_reg = ToRegister(instr->result());
3107  XMMRegister input_reg = ToDoubleRegister(instr->value());
3108 
3109  Label below_half, done;
3110  // xmm_scratch = 0.5
3111  ExternalReference one_half = ExternalReference::address_of_one_half();
3112  __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
3113  __ ucomisd(xmm_scratch, input_reg);
3114  __ j(above, &below_half);
3115  // xmm_scratch = input + 0.5
3116  __ addsd(xmm_scratch, input_reg);
3117 
3118  // Compute Math.floor(value + 0.5).
3119  // Use truncating instruction (OK because input is positive).
3120  __ cvttsd2si(output_reg, Operand(xmm_scratch));
3121 
3122  // Overflow is signalled with minint.
3123  __ cmp(output_reg, 0x80000000u);
3124  DeoptimizeIf(equal, instr->environment());
3125  __ jmp(&done);
3126 
3127  __ bind(&below_half);
3128 
3129  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3130  // we can ignore the difference between a result of -0 and +0.
3131  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3132  // If the sign is positive, we return +0.
3133  __ movmskpd(output_reg, input_reg);
3134  __ test(output_reg, Immediate(1));
3135  DeoptimizeIf(not_zero, instr->environment());
3136  } else {
3137  // If the input is >= -0.5, we return +0.
3138  __ mov(output_reg, Immediate(0xBF000000));
3139  __ movd(xmm_scratch, Operand(output_reg));
3140  __ cvtss2sd(xmm_scratch, xmm_scratch);
3141  __ ucomisd(input_reg, xmm_scratch);
3142  DeoptimizeIf(below, instr->environment());
3143  }
3144  __ Set(output_reg, Immediate(0));
3145  __ bind(&done);
3146 }
3147 
3148 
3149 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3150  XMMRegister input_reg = ToDoubleRegister(instr->value());
3151  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3152  __ sqrtsd(input_reg, input_reg);
3153 }
3154 
3155 
3156 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3157  XMMRegister xmm_scratch = xmm0;
3158  XMMRegister input_reg = ToDoubleRegister(instr->value());
3159  Register scratch = ToRegister(instr->temp());
3160  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3161 
3162  // Note that according to ECMA-262 15.8.2.13:
3163  // Math.pow(-Infinity, 0.5) == Infinity
3164  // Math.sqrt(-Infinity) == NaN
3165  Label done, sqrt;
3166  // Check base for -Infinity. According to IEEE-754, single-precision
3167  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
3168  __ mov(scratch, 0xFF800000);
3169  __ movd(xmm_scratch, scratch);
3170  __ cvtss2sd(xmm_scratch, xmm_scratch);
3171  __ ucomisd(input_reg, xmm_scratch);
3172  // Comparing -Infinity with NaN results in "unordered", which sets the
3173  // zero flag as if both were equal. However, it also sets the carry flag.
3174  __ j(not_equal, &sqrt, Label::kNear);
3175  __ j(carry, &sqrt, Label::kNear);
3176  // If input is -Infinity, return Infinity.
3177  __ xorps(input_reg, input_reg);
3178  __ subsd(input_reg, xmm_scratch);
3179  __ jmp(&done, Label::kNear);
3180 
3181  // Square root.
3182  __ bind(&sqrt);
3183  __ xorps(xmm_scratch, xmm_scratch);
3184  __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3185  __ sqrtsd(input_reg, input_reg);
3186  __ bind(&done);
3187 }
3188 
3189 
3190 void LCodeGen::DoPower(LPower* instr) {
3191  Representation exponent_type = instr->hydrogen()->right()->representation();
3192  // Having marked this as a call, we can use any registers.
3193  // Just make sure that the input/output registers are the expected ones.
3194  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3195  ToDoubleRegister(instr->InputAt(1)).is(xmm1));
3196  ASSERT(!instr->InputAt(1)->IsRegister() ||
3197  ToRegister(instr->InputAt(1)).is(eax));
3198  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
3199  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
3200 
3201  if (exponent_type.IsTagged()) {
3202  Label no_deopt;
3203  __ JumpIfSmi(eax, &no_deopt);
3204  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
3205  DeoptimizeIf(not_equal, instr->environment());
3206  __ bind(&no_deopt);
3207  MathPowStub stub(MathPowStub::TAGGED);
3208  __ CallStub(&stub);
3209  } else if (exponent_type.IsInteger32()) {
3210  MathPowStub stub(MathPowStub::INTEGER);
3211  __ CallStub(&stub);
3212  } else {
3213  ASSERT(exponent_type.IsDouble());
3214  MathPowStub stub(MathPowStub::DOUBLE);
3215  __ CallStub(&stub);
3216  }
3217 }
3218 
3219 
3220 void LCodeGen::DoRandom(LRandom* instr) {
3221  class DeferredDoRandom: public LDeferredCode {
3222  public:
3223  DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3224  : LDeferredCode(codegen), instr_(instr) { }
3225  virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3226  virtual LInstruction* instr() { return instr_; }
3227  private:
3228  LRandom* instr_;
3229  };
3230 
3231  DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3232 
3233  // Having marked this instruction as a call we can use any
3234  // registers.
3235  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3236  ASSERT(ToRegister(instr->InputAt(0)).is(eax));
3237  // Assert that the register size is indeed the size of each seed.
3238  static const int kSeedSize = sizeof(uint32_t);
3239  STATIC_ASSERT(kPointerSize == kSeedSize);
3240 
3242  static const int kRandomSeedOffset =
3244  __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
3245  // ebx: FixedArray of the global context's random seeds
3246 
3247  // Load state[0].
3249  // If state[0] == 0, call runtime to initialize seeds.
3250  __ test(ecx, ecx);
3251  __ j(zero, deferred->entry());
3252  // Load state[1].
3253  __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
3254  // ecx: state[0]
3255  // eax: state[1]
3256 
3257  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3258  __ movzx_w(edx, ecx);
3259  __ imul(edx, edx, 18273);
3260  __ shr(ecx, 16);
3261  __ add(ecx, edx);
3262  // Save state[0].
3264 
3265  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3266  __ movzx_w(edx, eax);
3267  __ imul(edx, edx, 36969);
3268  __ shr(eax, 16);
3269  __ add(eax, edx);
3270  // Save state[1].
3271  __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
3272 
3273  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3274  __ shl(ecx, 14);
3275  __ and_(eax, Immediate(0x3FFFF));
3276  __ add(eax, ecx);
3277 
3278  __ bind(deferred->exit());
3279  // Convert 32 random bits in eax to 0.(32 random bits) in a double
3280  // by computing:
3281  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
3282  __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
3283  __ movd(xmm2, ebx);
3284  __ movd(xmm1, eax);
3285  __ cvtss2sd(xmm2, xmm2);
3286  __ xorps(xmm1, xmm2);
3287  __ subsd(xmm1, xmm2);
3288 }
3289 
3290 
3291 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3292  __ PrepareCallCFunction(1, ebx);
3293  __ mov(Operand(esp, 0), eax);
3294  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3295  // Return value is in eax.
3296 }
3297 
3298 
3299 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3300  ASSERT(instr->value()->Equals(instr->result()));
3301  XMMRegister input_reg = ToDoubleRegister(instr->value());
3302  Label positive, done, zero;
3303  __ xorps(xmm0, xmm0);
3304  __ ucomisd(input_reg, xmm0);
3305  __ j(above, &positive, Label::kNear);
3306  __ j(equal, &zero, Label::kNear);
3307  ExternalReference nan =
3308  ExternalReference::address_of_canonical_non_hole_nan();
3309  __ movdbl(input_reg, Operand::StaticVariable(nan));
3310  __ jmp(&done, Label::kNear);
3311  __ bind(&zero);
3312  __ push(Immediate(0xFFF00000));
3313  __ push(Immediate(0));
3314  __ movdbl(input_reg, Operand(esp, 0));
3315  __ add(Operand(esp), Immediate(kDoubleSize));
3316  __ jmp(&done, Label::kNear);
3317  __ bind(&positive);
3318  __ fldln2();
3319  __ sub(Operand(esp), Immediate(kDoubleSize));
3320  __ movdbl(Operand(esp, 0), input_reg);
3321  __ fld_d(Operand(esp, 0));
3322  __ fyl2x();
3323  __ fstp_d(Operand(esp, 0));
3324  __ movdbl(input_reg, Operand(esp, 0));
3325  __ add(Operand(esp), Immediate(kDoubleSize));
3326  __ bind(&done);
3327 }
3328 
3329 
3330 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3331  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3332  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3334  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3335 }
3336 
3337 
3338 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3339  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3340  TranscendentalCacheStub stub(TranscendentalCache::COS,
3342  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3343 }
3344 
3345 
3346 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3347  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3348  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3350  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3351 }
3352 
3353 
3354 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3355  switch (instr->op()) {
3356  case kMathAbs:
3357  DoMathAbs(instr);
3358  break;
3359  case kMathFloor:
3360  DoMathFloor(instr);
3361  break;
3362  case kMathRound:
3363  DoMathRound(instr);
3364  break;
3365  case kMathSqrt:
3366  DoMathSqrt(instr);
3367  break;
3368  case kMathCos:
3369  DoMathCos(instr);
3370  break;
3371  case kMathSin:
3372  DoMathSin(instr);
3373  break;
3374  case kMathTan:
3375  DoMathTan(instr);
3376  break;
3377  case kMathLog:
3378  DoMathLog(instr);
3379  break;
3380 
3381  default:
3382  UNREACHABLE();
3383  }
3384 }
3385 
3386 
3387 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3388  ASSERT(ToRegister(instr->context()).is(esi));
3389  ASSERT(ToRegister(instr->function()).is(edi));
3390  ASSERT(instr->HasPointerMap());
3391 
3392  if (instr->known_function().is_null()) {
3393  LPointerMap* pointers = instr->pointer_map();
3394  RecordPosition(pointers->position());
3395  SafepointGenerator generator(
3396  this, pointers, Safepoint::kLazyDeopt);
3397  ParameterCount count(instr->arity());
3398  __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3399  } else {
3400  CallKnownFunction(instr->known_function(),
3401  instr->arity(),
3402  instr,
3404  EDI_CONTAINS_TARGET);
3405  }
3406 }
3407 
3408 
3409 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3410  ASSERT(ToRegister(instr->context()).is(esi));
3411  ASSERT(ToRegister(instr->key()).is(ecx));
3412  ASSERT(ToRegister(instr->result()).is(eax));
3413 
3414  int arity = instr->arity();
3415  Handle<Code> ic =
3416  isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3417  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3418 }
3419 
3420 
3421 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3422  ASSERT(ToRegister(instr->context()).is(esi));
3423  ASSERT(ToRegister(instr->result()).is(eax));
3424 
3425  int arity = instr->arity();
3426  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3427  Handle<Code> ic =
3428  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3429  __ mov(ecx, instr->name());
3430  CallCode(ic, mode, instr);
3431 }
3432 
3433 
3434 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3435  ASSERT(ToRegister(instr->context()).is(esi));
3436  ASSERT(ToRegister(instr->function()).is(edi));
3437  ASSERT(ToRegister(instr->result()).is(eax));
3438 
3439  int arity = instr->arity();
3440  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3441  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3442 }
3443 
3444 
3445 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3446  ASSERT(ToRegister(instr->context()).is(esi));
3447  ASSERT(ToRegister(instr->result()).is(eax));
3448 
3449  int arity = instr->arity();
3450  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3451  Handle<Code> ic =
3452  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3453  __ mov(ecx, instr->name());
3454  CallCode(ic, mode, instr);
3455 }
3456 
3457 
3458 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3459  ASSERT(ToRegister(instr->result()).is(eax));
3460  CallKnownFunction(instr->target(),
3461  instr->arity(),
3462  instr,
3464  EDI_UNINITIALIZED);
3465 }
3466 
3467 
3468 void LCodeGen::DoCallNew(LCallNew* instr) {
3469  ASSERT(ToRegister(instr->context()).is(esi));
3470  ASSERT(ToRegister(instr->constructor()).is(edi));
3471  ASSERT(ToRegister(instr->result()).is(eax));
3472 
3473  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3474  __ Set(eax, Immediate(instr->arity()));
3475  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3476 }
3477 
3478 
3479 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3480  CallRuntime(instr->function(), instr->arity(), instr);
3481 }
3482 
3483 
3484 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3485  Register object = ToRegister(instr->object());
3486  Register value = ToRegister(instr->value());
3487  int offset = instr->offset();
3488 
3489  if (!instr->transition().is_null()) {
3490  if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
3491  __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
3492  } else {
3493  Register temp = ToRegister(instr->TempAt(0));
3494  Register temp_map = ToRegister(instr->TempAt(1));
3495  __ mov(temp_map, instr->transition());
3496  __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
3497  // Update the write barrier for the map field.
3498  __ RecordWriteField(object,
3500  temp_map,
3501  temp,
3502  kSaveFPRegs,
3504  OMIT_SMI_CHECK);
3505  }
3506  }
3507 
3508  // Do the store.
3509  HType type = instr->hydrogen()->value()->type();
3510  SmiCheck check_needed =
3511  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3512  if (instr->is_in_object()) {
3513  __ mov(FieldOperand(object, offset), value);
3514  if (instr->hydrogen()->NeedsWriteBarrier()) {
3515  Register temp = ToRegister(instr->TempAt(0));
3516  // Update the write barrier for the object for in-object properties.
3517  __ RecordWriteField(object,
3518  offset,
3519  value,
3520  temp,
3521  kSaveFPRegs,
3523  check_needed);
3524  }
3525  } else {
3526  Register temp = ToRegister(instr->TempAt(0));
3527  __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
3528  __ mov(FieldOperand(temp, offset), value);
3529  if (instr->hydrogen()->NeedsWriteBarrier()) {
3530  // Update the write barrier for the properties array.
3531  // object is used as a scratch register.
3532  __ RecordWriteField(temp,
3533  offset,
3534  value,
3535  object,
3536  kSaveFPRegs,
3538  check_needed);
3539  }
3540  }
3541 }
3542 
3543 
3544 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3545  ASSERT(ToRegister(instr->context()).is(esi));
3546  ASSERT(ToRegister(instr->object()).is(edx));
3547  ASSERT(ToRegister(instr->value()).is(eax));
3548 
3549  __ mov(ecx, instr->name());
3550  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3551  ? isolate()->builtins()->StoreIC_Initialize_Strict()
3552  : isolate()->builtins()->StoreIC_Initialize();
3553  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3554 }
3555 
3556 
3557 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3558  if (instr->index()->IsConstantOperand()) {
3559  __ cmp(ToOperand(instr->length()),
3560  Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
3561  DeoptimizeIf(below_equal, instr->environment());
3562  } else {
3563  __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
3564  DeoptimizeIf(above_equal, instr->environment());
3565  }
3566 }
3567 
3568 
3569 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3570  LStoreKeyedSpecializedArrayElement* instr) {
3571  ElementsKind elements_kind = instr->elements_kind();
3572  Operand operand(BuildFastArrayOperand(instr->external_pointer(),
3573  instr->key(),
3574  elements_kind,
3575  0,
3576  instr->additional_index()));
3577  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3578  __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
3579  __ movss(operand, xmm0);
3580  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3581  __ movdbl(operand, ToDoubleRegister(instr->value()));
3582  } else {
3583  Register value = ToRegister(instr->value());
3584  switch (elements_kind) {
3588  __ mov_b(operand, value);
3589  break;
3592  __ mov_w(operand, value);
3593  break;
3594  case EXTERNAL_INT_ELEMENTS:
3596  __ mov(operand, value);
3597  break;
3600  case FAST_SMI_ELEMENTS:
3601  case FAST_ELEMENTS:
3602  case FAST_DOUBLE_ELEMENTS:
3604  case FAST_HOLEY_ELEMENTS:
3606  case DICTIONARY_ELEMENTS:
3608  UNREACHABLE();
3609  break;
3610  }
3611  }
3612 }
3613 
3614 
3615 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3616  Register value = ToRegister(instr->value());
3617  Register elements = ToRegister(instr->object());
3618  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3619 
3620  Operand operand = BuildFastArrayOperand(
3621  instr->object(),
3622  instr->key(),
3623  FAST_ELEMENTS,
3625  instr->additional_index());
3626  __ mov(operand, value);
3627 
3628  if (instr->hydrogen()->NeedsWriteBarrier()) {
3629  ASSERT(!instr->key()->IsConstantOperand());
3630  HType type = instr->hydrogen()->value()->type();
3631  SmiCheck check_needed =
3632  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3633  // Compute address of modified element and store it into key register.
3634  __ lea(key, operand);
3635  __ RecordWrite(elements,
3636  key,
3637  value,
3638  kSaveFPRegs,
3640  check_needed);
3641  }
3642 }
3643 
3644 
3645 void LCodeGen::DoStoreKeyedFastDoubleElement(
3646  LStoreKeyedFastDoubleElement* instr) {
3647  XMMRegister value = ToDoubleRegister(instr->value());
3648 
3649  if (instr->NeedsCanonicalization()) {
3650  Label have_value;
3651 
3652  __ ucomisd(value, value);
3653  __ j(parity_odd, &have_value); // NaN.
3654 
3655  ExternalReference canonical_nan_reference =
3656  ExternalReference::address_of_canonical_non_hole_nan();
3657  __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
3658  __ bind(&have_value);
3659  }
3660 
3661  Operand double_store_operand = BuildFastArrayOperand(
3662  instr->elements(),
3663  instr->key(),
3666  instr->additional_index());
3667  __ movdbl(double_store_operand, value);
3668 }
3669 
3670 
3671 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3672  ASSERT(ToRegister(instr->context()).is(esi));
3673  ASSERT(ToRegister(instr->object()).is(edx));
3674  ASSERT(ToRegister(instr->key()).is(ecx));
3675  ASSERT(ToRegister(instr->value()).is(eax));
3676 
3677  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3678  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3679  : isolate()->builtins()->KeyedStoreIC_Initialize();
3680  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3681 }
3682 
3683 
3684 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3685  Register object_reg = ToRegister(instr->object());
3686  Register new_map_reg = ToRegister(instr->new_map_reg());
3687 
3688  Handle<Map> from_map = instr->original_map();
3689  Handle<Map> to_map = instr->transitioned_map();
3690  ElementsKind from_kind = from_map->elements_kind();
3691  ElementsKind to_kind = to_map->elements_kind();
3692 
3693  Label not_applicable;
3694  bool is_simple_map_transition =
3695  IsSimpleMapChangeTransition(from_kind, to_kind);
3696  Label::Distance branch_distance =
3697  is_simple_map_transition ? Label::kNear : Label::kFar;
3698  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
3699  __ j(not_equal, &not_applicable, branch_distance);
3700  if (is_simple_map_transition) {
3701  Register object_reg = ToRegister(instr->object());
3702  Handle<Map> map = instr->hydrogen()->transitioned_map();
3703  __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
3704  Immediate(map));
3705  // Write barrier.
3706  ASSERT_NE(instr->temp_reg(), NULL);
3707  __ RecordWriteForMap(object_reg, to_map, new_map_reg,
3708  ToRegister(instr->temp_reg()),
3709  kDontSaveFPRegs);
3710  } else if (IsFastSmiElementsKind(from_kind) &&
3711  IsFastDoubleElementsKind(to_kind)) {
3712  __ mov(new_map_reg, to_map);
3713  Register fixed_object_reg = ToRegister(instr->temp_reg());
3714  ASSERT(fixed_object_reg.is(edx));
3715  ASSERT(new_map_reg.is(ebx));
3716  __ mov(fixed_object_reg, object_reg);
3717  CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3718  RelocInfo::CODE_TARGET, instr);
3719  } else if (IsFastDoubleElementsKind(from_kind) &&
3720  IsFastObjectElementsKind(to_kind)) {
3721  __ mov(new_map_reg, to_map);
3722  Register fixed_object_reg = ToRegister(instr->temp_reg());
3723  ASSERT(fixed_object_reg.is(edx));
3724  ASSERT(new_map_reg.is(ebx));
3725  __ mov(fixed_object_reg, object_reg);
3726  CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3727  RelocInfo::CODE_TARGET, instr);
3728  } else {
3729  UNREACHABLE();
3730  }
3731  __ bind(&not_applicable);
3732 }
3733 
3734 
3735 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3736  class DeferredStringCharCodeAt: public LDeferredCode {
3737  public:
3738  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3739  : LDeferredCode(codegen), instr_(instr) { }
3740  virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3741  virtual LInstruction* instr() { return instr_; }
3742  private:
3743  LStringCharCodeAt* instr_;
3744  };
3745 
3746  DeferredStringCharCodeAt* deferred =
3747  new(zone()) DeferredStringCharCodeAt(this, instr);
3748 
3750  factory(),
3751  ToRegister(instr->string()),
3752  ToRegister(instr->index()),
3753  ToRegister(instr->result()),
3754  deferred->entry());
3755  __ bind(deferred->exit());
3756 }
3757 
3758 
3759 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3760  Register string = ToRegister(instr->string());
3761  Register result = ToRegister(instr->result());
3762 
3763  // TODO(3095996): Get rid of this. For now, we need to make the
3764  // result register contain a valid pointer because it is already
3765  // contained in the register pointer map.
3766  __ Set(result, Immediate(0));
3767 
3768  PushSafepointRegistersScope scope(this);
3769  __ push(string);
3770  // Push the index as a smi. This is safe because of the checks in
3771  // DoStringCharCodeAt above.
3773  if (instr->index()->IsConstantOperand()) {
3774  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3775  __ push(Immediate(Smi::FromInt(const_index)));
3776  } else {
3777  Register index = ToRegister(instr->index());
3778  __ SmiTag(index);
3779  __ push(index);
3780  }
3781  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
3782  instr, instr->context());
3783  if (FLAG_debug_code) {
3784  __ AbortIfNotSmi(eax);
3785  }
3786  __ SmiUntag(eax);
3787  __ StoreToSafepointRegisterSlot(result, eax);
3788 }
3789 
3790 
3791 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3792  class DeferredStringCharFromCode: public LDeferredCode {
3793  public:
3794  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3795  : LDeferredCode(codegen), instr_(instr) { }
3796  virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3797  virtual LInstruction* instr() { return instr_; }
3798  private:
3799  LStringCharFromCode* instr_;
3800  };
3801 
3802  DeferredStringCharFromCode* deferred =
3803  new(zone()) DeferredStringCharFromCode(this, instr);
3804 
3805  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3806  Register char_code = ToRegister(instr->char_code());
3807  Register result = ToRegister(instr->result());
3808  ASSERT(!char_code.is(result));
3809 
3810  __ cmp(char_code, String::kMaxAsciiCharCode);
3811  __ j(above, deferred->entry());
3812  __ Set(result, Immediate(factory()->single_character_string_cache()));
3813  __ mov(result, FieldOperand(result,
3814  char_code, times_pointer_size,
3816  __ cmp(result, factory()->undefined_value());
3817  __ j(equal, deferred->entry());
3818  __ bind(deferred->exit());
3819 }
3820 
3821 
3822 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3823  Register char_code = ToRegister(instr->char_code());
3824  Register result = ToRegister(instr->result());
3825 
3826  // TODO(3095996): Get rid of this. For now, we need to make the
3827  // result register contain a valid pointer because it is already
3828  // contained in the register pointer map.
3829  __ Set(result, Immediate(0));
3830 
3831  PushSafepointRegistersScope scope(this);
3832  __ SmiTag(char_code);
3833  __ push(char_code);
3834  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
3835  __ StoreToSafepointRegisterSlot(result, eax);
3836 }
3837 
3838 
3839 void LCodeGen::DoStringLength(LStringLength* instr) {
3840  Register string = ToRegister(instr->string());
3841  Register result = ToRegister(instr->result());
3842  __ mov(result, FieldOperand(string, String::kLengthOffset));
3843 }
3844 
3845 
3846 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3847  EmitPushTaggedOperand(instr->left());
3848  EmitPushTaggedOperand(instr->right());
3849  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3850  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3851 }
3852 
3853 
3854 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3855  LOperand* input = instr->InputAt(0);
3856  ASSERT(input->IsRegister() || input->IsStackSlot());
3857  LOperand* output = instr->result();
3858  ASSERT(output->IsDoubleRegister());
3859  __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
3860 }
3861 
3862 
3863 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3864  class DeferredNumberTagI: public LDeferredCode {
3865  public:
3866  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3867  : LDeferredCode(codegen), instr_(instr) { }
3868  virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3869  virtual LInstruction* instr() { return instr_; }
3870  private:
3871  LNumberTagI* instr_;
3872  };
3873 
3874  LOperand* input = instr->InputAt(0);
3875  ASSERT(input->IsRegister() && input->Equals(instr->result()));
3876  Register reg = ToRegister(input);
3877 
3878  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
3879  __ SmiTag(reg);
3880  __ j(overflow, deferred->entry());
3881  __ bind(deferred->exit());
3882 }
3883 
3884 
3885 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3886  Label slow;
3887  Register reg = ToRegister(instr->InputAt(0));
3888  Register tmp = reg.is(eax) ? ecx : eax;
3889 
3890  // Preserve the value of all registers.
3891  PushSafepointRegistersScope scope(this);
3892 
3893  // There was overflow, so bits 30 and 31 of the original integer
3894  // disagree. Try to allocate a heap number in new space and store
3895  // the value in there. If that fails, call the runtime system.
3896  Label done;
3897  __ SmiUntag(reg);
3898  __ xor_(reg, 0x80000000);
3899  __ cvtsi2sd(xmm0, Operand(reg));
3900  if (FLAG_inline_new) {
3901  __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
3902  __ jmp(&done, Label::kNear);
3903  }
3904 
3905  // Slow case: Call the runtime system to do the number allocation.
3906  __ bind(&slow);
3907 
3908  // TODO(3095996): Put a valid pointer value in the stack slot where the result
3909  // register is stored, as this register is in the pointer map, but contains an
3910  // integer value.
3911  __ StoreToSafepointRegisterSlot(reg, Immediate(0));
3912  // NumberTagI and NumberTagD use the context from the frame, rather than
3913  // the environment's HContext or HInlinedContext value.
3914  // They only call Runtime::kAllocateHeapNumber.
3915  // The corresponding HChange instructions are added in a phase that does
3916  // not have easy access to the local context.
3918  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
3919  RecordSafepointWithRegisters(
3920  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
3921  if (!reg.is(eax)) __ mov(reg, eax);
3922 
3923  // Done. Put the value in xmm0 into the value of the allocated heap
3924  // number.
3925  __ bind(&done);
3927  __ StoreToSafepointRegisterSlot(reg, reg);
3928 }
3929 
3930 
3931 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3932  class DeferredNumberTagD: public LDeferredCode {
3933  public:
3934  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3935  : LDeferredCode(codegen), instr_(instr) { }
3936  virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3937  virtual LInstruction* instr() { return instr_; }
3938  private:
3939  LNumberTagD* instr_;
3940  };
3941 
3942  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3943  Register reg = ToRegister(instr->result());
3944  Register tmp = ToRegister(instr->TempAt(0));
3945 
3946  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
3947  if (FLAG_inline_new) {
3948  __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
3949  } else {
3950  __ jmp(deferred->entry());
3951  }
3952  __ bind(deferred->exit());
3953  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
3954 }
3955 
3956 
3957 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3958  // TODO(3095996): Get rid of this. For now, we need to make the
3959  // result register contain a valid pointer because it is already
3960  // contained in the register pointer map.
3961  Register reg = ToRegister(instr->result());
3962  __ Set(reg, Immediate(0));
3963 
3964  PushSafepointRegistersScope scope(this);
3965  // NumberTagI and NumberTagD use the context from the frame, rather than
3966  // the environment's HContext or HInlinedContext value.
3967  // They only call Runtime::kAllocateHeapNumber.
3968  // The corresponding HChange instructions are added in a phase that does
3969  // not have easy access to the local context.
3971  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
3972  RecordSafepointWithRegisters(
3973  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
3974  __ StoreToSafepointRegisterSlot(reg, eax);
3975 }
3976 
3977 
3978 void LCodeGen::DoSmiTag(LSmiTag* instr) {
3979  LOperand* input = instr->InputAt(0);
3980  ASSERT(input->IsRegister() && input->Equals(instr->result()));
3981  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3982  __ SmiTag(ToRegister(input));
3983 }
3984 
3985 
3986 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3987  LOperand* input = instr->InputAt(0);
3988  ASSERT(input->IsRegister() && input->Equals(instr->result()));
3989  if (instr->needs_check()) {
3990  __ test(ToRegister(input), Immediate(kSmiTagMask));
3991  DeoptimizeIf(not_zero, instr->environment());
3992  } else {
3993  if (FLAG_debug_code) {
3994  __ AbortIfNotSmi(ToRegister(input));
3995  }
3996  }
3997  __ SmiUntag(ToRegister(input));
3998 }
3999 
4000 
4001 void LCodeGen::EmitNumberUntagD(Register input_reg,
4002  Register temp_reg,
4003  XMMRegister result_reg,
4004  bool deoptimize_on_undefined,
4005  bool deoptimize_on_minus_zero,
4006  LEnvironment* env) {
4007  Label load_smi, done;
4008 
4009  // Smi check.
4010  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4011 
4012  // Heap number map check.
4013  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4014  factory()->heap_number_map());
4015  if (deoptimize_on_undefined) {
4016  DeoptimizeIf(not_equal, env);
4017  } else {
4018  Label heap_number;
4019  __ j(equal, &heap_number, Label::kNear);
4020 
4021  __ cmp(input_reg, factory()->undefined_value());
4022  DeoptimizeIf(not_equal, env);
4023 
4024  // Convert undefined to NaN.
4025  ExternalReference nan =
4026  ExternalReference::address_of_canonical_non_hole_nan();
4027  __ movdbl(result_reg, Operand::StaticVariable(nan));
4028  __ jmp(&done, Label::kNear);
4029 
4030  __ bind(&heap_number);
4031  }
4032  // Heap number to XMM conversion.
4033  __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4034  if (deoptimize_on_minus_zero) {
4035  XMMRegister xmm_scratch = xmm0;
4036  __ xorps(xmm_scratch, xmm_scratch);
4037  __ ucomisd(result_reg, xmm_scratch);
4038  __ j(not_zero, &done, Label::kNear);
4039  __ movmskpd(temp_reg, result_reg);
4040  __ test_b(temp_reg, 1);
4041  DeoptimizeIf(not_zero, env);
4042  }
4043  __ jmp(&done, Label::kNear);
4044 
4045  // Smi to XMM conversion
4046  __ bind(&load_smi);
4047  __ SmiUntag(input_reg); // Untag smi before converting to float.
4048  __ cvtsi2sd(result_reg, Operand(input_reg));
4049  __ SmiTag(input_reg); // Retag smi.
4050  __ bind(&done);
4051 }
4052 
4053 
4054 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4055  Label done, heap_number;
4056  Register input_reg = ToRegister(instr->InputAt(0));
4057 
4058  // Heap number map check.
4059  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4060  factory()->heap_number_map());
4061 
4062  if (instr->truncating()) {
4063  __ j(equal, &heap_number, Label::kNear);
4064  // Check for undefined. Undefined is converted to zero for truncating
4065  // conversions.
4066  __ cmp(input_reg, factory()->undefined_value());
4067  DeoptimizeIf(not_equal, instr->environment());
4068  __ mov(input_reg, 0);
4069  __ jmp(&done, Label::kNear);
4070 
4071  __ bind(&heap_number);
4073  CpuFeatures::Scope scope(SSE3);
4074  Label convert;
4075  // Use more powerful conversion when sse3 is available.
4076  // Load x87 register with heap number.
4077  __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4078  // Get exponent alone and check for too-big exponent.
4079  __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
4080  __ and_(input_reg, HeapNumber::kExponentMask);
4081  const uint32_t kTooBigExponent =
4083  __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
4084  __ j(less, &convert, Label::kNear);
4085  // Pop FPU stack before deoptimizing.
4086  __ fstp(0);
4087  DeoptimizeIf(no_condition, instr->environment());
4088 
4089  // Reserve space for 64 bit answer.
4090  __ bind(&convert);
4091  __ sub(Operand(esp), Immediate(kDoubleSize));
4092  // Do conversion, which cannot fail because we checked the exponent.
4093  __ fisttp_d(Operand(esp, 0));
4094  __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
4095  __ add(Operand(esp), Immediate(kDoubleSize));
4096  } else {
4097  XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
4098  __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4099  __ cvttsd2si(input_reg, Operand(xmm0));
4100  __ cmp(input_reg, 0x80000000u);
4101  __ j(not_equal, &done);
4102  // Check if the input was 0x8000000 (kMinInt).
4103  // If no, then we got an overflow and we deoptimize.
4104  ExternalReference min_int = ExternalReference::address_of_min_int();
4105  __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
4106  __ ucomisd(xmm_temp, xmm0);
4107  DeoptimizeIf(not_equal, instr->environment());
4108  DeoptimizeIf(parity_even, instr->environment()); // NaN.
4109  }
4110  } else {
4111  // Deoptimize if we don't have a heap number.
4112  DeoptimizeIf(not_equal, instr->environment());
4113 
4114  XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
4115  __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4116  __ cvttsd2si(input_reg, Operand(xmm0));
4117  __ cvtsi2sd(xmm_temp, Operand(input_reg));
4118  __ ucomisd(xmm0, xmm_temp);
4119  DeoptimizeIf(not_equal, instr->environment());
4120  DeoptimizeIf(parity_even, instr->environment()); // NaN.
4121  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4122  __ test(input_reg, Operand(input_reg));
4123  __ j(not_zero, &done);
4124  __ movmskpd(input_reg, xmm0);
4125  __ and_(input_reg, 1);
4126  DeoptimizeIf(not_zero, instr->environment());
4127  }
4128  }
4129  __ bind(&done);
4130 }
4131 
4132 
4133 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4134  class DeferredTaggedToI: public LDeferredCode {
4135  public:
4136  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4137  : LDeferredCode(codegen), instr_(instr) { }
4138  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4139  virtual LInstruction* instr() { return instr_; }
4140  private:
4141  LTaggedToI* instr_;
4142  };
4143 
4144  LOperand* input = instr->InputAt(0);
4145  ASSERT(input->IsRegister());
4146  ASSERT(input->Equals(instr->result()));
4147 
4148  Register input_reg = ToRegister(input);
4149 
4150  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4151 
4152  // Smi check.
4153  __ JumpIfNotSmi(input_reg, deferred->entry());
4154 
4155  // Smi to int32 conversion
4156  __ SmiUntag(input_reg); // Untag smi.
4157 
4158  __ bind(deferred->exit());
4159 }
4160 
4161 
4162 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4163  LOperand* input = instr->InputAt(0);
4164  ASSERT(input->IsRegister());
4165  LOperand* temp = instr->TempAt(0);
4166  ASSERT(temp == NULL || temp->IsRegister());
4167  LOperand* result = instr->result();
4168  ASSERT(result->IsDoubleRegister());
4169 
4170  Register input_reg = ToRegister(input);
4171  XMMRegister result_reg = ToDoubleRegister(result);
4172 
4173  bool deoptimize_on_minus_zero =
4174  instr->hydrogen()->deoptimize_on_minus_zero();
4175  Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
4176 
4177  EmitNumberUntagD(input_reg,
4178  temp_reg,
4179  result_reg,
4180  instr->hydrogen()->deoptimize_on_undefined(),
4181  deoptimize_on_minus_zero,
4182  instr->environment());
4183 }
4184 
4185 
4186 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4187  LOperand* input = instr->InputAt(0);
4188  ASSERT(input->IsDoubleRegister());
4189  LOperand* result = instr->result();
4190  ASSERT(result->IsRegister());
4191 
4192  XMMRegister input_reg = ToDoubleRegister(input);
4193  Register result_reg = ToRegister(result);
4194 
4195  if (instr->truncating()) {
4196  // Performs a truncating conversion of a floating point number as used by
4197  // the JS bitwise operations.
4198  __ cvttsd2si(result_reg, Operand(input_reg));
4199  __ cmp(result_reg, 0x80000000u);
4201  // This will deoptimize if the exponent of the input in out of range.
4202  CpuFeatures::Scope scope(SSE3);
4203  Label convert, done;
4204  __ j(not_equal, &done, Label::kNear);
4205  __ sub(Operand(esp), Immediate(kDoubleSize));
4206  __ movdbl(Operand(esp, 0), input_reg);
4207  // Get exponent alone and check for too-big exponent.
4208  __ mov(result_reg, Operand(esp, sizeof(int32_t)));
4209  __ and_(result_reg, HeapNumber::kExponentMask);
4210  const uint32_t kTooBigExponent =
4212  __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
4213  __ j(less, &convert, Label::kNear);
4214  __ add(Operand(esp), Immediate(kDoubleSize));
4215  DeoptimizeIf(no_condition, instr->environment());
4216  __ bind(&convert);
4217  // Do conversion, which cannot fail because we checked the exponent.
4218  __ fld_d(Operand(esp, 0));
4219  __ fisttp_d(Operand(esp, 0));
4220  __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
4221  __ add(Operand(esp), Immediate(kDoubleSize));
4222  __ bind(&done);
4223  } else {
4224  Label done;
4225  Register temp_reg = ToRegister(instr->TempAt(0));
4226  XMMRegister xmm_scratch = xmm0;
4227 
4228  // If cvttsd2si succeeded, we're done. Otherwise, we attempt
4229  // manual conversion.
4230  __ j(not_equal, &done, Label::kNear);
4231 
4232  // Get high 32 bits of the input in result_reg and temp_reg.
4233  __ pshufd(xmm_scratch, input_reg, 1);
4234  __ movd(Operand(temp_reg), xmm_scratch);
4235  __ mov(result_reg, temp_reg);
4236 
4237  // Prepare negation mask in temp_reg.
4238  __ sar(temp_reg, kBitsPerInt - 1);
4239 
4240  // Extract the exponent from result_reg and subtract adjusted
4241  // bias from it. The adjustment is selected in a way such that
4242  // when the difference is zero, the answer is in the low 32 bits
4243  // of the input, otherwise a shift has to be performed.
4244  __ shr(result_reg, HeapNumber::kExponentShift);
4245  __ and_(result_reg,
4247  __ sub(Operand(result_reg),
4248  Immediate(HeapNumber::kExponentBias +
4251  // Don't handle big (> kMantissaBits + kExponentBits == 63) or
4252  // special exponents.
4253  DeoptimizeIf(greater, instr->environment());
4254 
4255  // Zero out the sign and the exponent in the input (by shifting
4256  // it to the left) and restore the implicit mantissa bit,
4257  // i.e. convert the input to unsigned int64 shifted left by
4258  // kExponentBits.
4259  ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
4260  // Minus zero has the most significant bit set and the other
4261  // bits cleared.
4262  __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
4263  __ psllq(input_reg, HeapNumber::kExponentBits);
4264  __ por(input_reg, xmm_scratch);
4265 
4266  // Get the amount to shift the input right in xmm_scratch.
4267  __ neg(result_reg);
4268  __ movd(xmm_scratch, Operand(result_reg));
4269 
4270  // Shift the input right and extract low 32 bits.
4271  __ psrlq(input_reg, xmm_scratch);
4272  __ movd(Operand(result_reg), input_reg);
4273 
4274  // Use the prepared mask in temp_reg to negate the result if necessary.
4275  __ xor_(result_reg, Operand(temp_reg));
4276  __ sub(result_reg, Operand(temp_reg));
4277  __ bind(&done);
4278  }
4279  } else {
4280  Label done;
4281  __ cvttsd2si(result_reg, Operand(input_reg));
4282  __ cvtsi2sd(xmm0, Operand(result_reg));
4283  __ ucomisd(xmm0, input_reg);
4284  DeoptimizeIf(not_equal, instr->environment());
4285  DeoptimizeIf(parity_even, instr->environment()); // NaN.
4286  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4287  // The integer converted back is equal to the original. We
4288  // only have to test if we got -0 as an input.
4289  __ test(result_reg, Operand(result_reg));
4290  __ j(not_zero, &done, Label::kNear);
4291  __ movmskpd(result_reg, input_reg);
4292  // Bit 0 contains the sign of the double in input_reg.
4293  // If input was positive, we are ok and return 0, otherwise
4294  // deoptimize.
4295  __ and_(result_reg, 1);
4296  DeoptimizeIf(not_zero, instr->environment());
4297  }
4298  __ bind(&done);
4299  }
4300 }
4301 
4302 
4303 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4304  LOperand* input = instr->InputAt(0);
4305  __ test(ToOperand(input), Immediate(kSmiTagMask));
4306  DeoptimizeIf(not_zero, instr->environment());
4307 }
4308 
4309 
4310 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4311  LOperand* input = instr->InputAt(0);
4312  __ test(ToOperand(input), Immediate(kSmiTagMask));
4313  DeoptimizeIf(zero, instr->environment());
4314 }
4315 
4316 
4317 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4318  Register input = ToRegister(instr->InputAt(0));
4319  Register temp = ToRegister(instr->TempAt(0));
4320 
4321  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
4322 
4323  if (instr->hydrogen()->is_interval_check()) {
4324  InstanceType first;
4325  InstanceType last;
4326  instr->hydrogen()->GetCheckInterval(&first, &last);
4327 
4329  static_cast<int8_t>(first));
4330 
4331  // If there is only one type in the interval check for equality.
4332  if (first == last) {
4333  DeoptimizeIf(not_equal, instr->environment());
4334  } else {
4335  DeoptimizeIf(below, instr->environment());
4336  // Omit check for the last type.
4337  if (last != LAST_TYPE) {
4339  static_cast<int8_t>(last));
4340  DeoptimizeIf(above, instr->environment());
4341  }
4342  }
4343  } else {
4344  uint8_t mask;
4345  uint8_t tag;
4346  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4347 
4348  if (IsPowerOf2(mask)) {
4349  ASSERT(tag == 0 || IsPowerOf2(tag));
4350  __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
4351  DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
4352  } else {
4353  __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
4354  __ and_(temp, mask);
4355  __ cmp(temp, tag);
4356  DeoptimizeIf(not_equal, instr->environment());
4357  }
4358  }
4359 }
4360 
4361 
4362 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4363  Handle<JSFunction> target = instr->hydrogen()->target();
4364  if (isolate()->heap()->InNewSpace(*target)) {
4365  Register reg = ToRegister(instr->value());
4366  Handle<JSGlobalPropertyCell> cell =
4367  isolate()->factory()->NewJSGlobalPropertyCell(target);
4368  __ cmp(reg, Operand::Cell(cell));
4369  } else {
4370  Operand operand = ToOperand(instr->value());
4371  __ cmp(operand, target);
4372  }
4373  DeoptimizeIf(not_equal, instr->environment());
4374 }
4375 
4376 
4377 void LCodeGen::DoCheckMapCommon(Register reg,
4378  Handle<Map> map,
4379  CompareMapMode mode,
4380  LEnvironment* env) {
4381  Label success;
4382  __ CompareMap(reg, map, &success, mode);
4383  DeoptimizeIf(not_equal, env);
4384  __ bind(&success);
4385 }
4386 
4387 
4388 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4389  LOperand* input = instr->InputAt(0);
4390  ASSERT(input->IsRegister());
4391  Register reg = ToRegister(input);
4392 
4393  Label success;
4394  SmallMapList* map_set = instr->hydrogen()->map_set();
4395  for (int i = 0; i < map_set->length() - 1; i++) {
4396  Handle<Map> map = map_set->at(i);
4397  __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
4398  __ j(equal, &success);
4399  }
4400  Handle<Map> map = map_set->last();
4401  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
4402  __ bind(&success);
4403 }
4404 
4405 
4406 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4407  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
4408  Register result_reg = ToRegister(instr->result());
4409  __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
4410 }
4411 
4412 
4413 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4414  ASSERT(instr->unclamped()->Equals(instr->result()));
4415  Register value_reg = ToRegister(instr->result());
4416  __ ClampUint8(value_reg);
4417 }
4418 
4419 
4420 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4421  ASSERT(instr->unclamped()->Equals(instr->result()));
4422  Register input_reg = ToRegister(instr->unclamped());
4423  Label is_smi, done, heap_number;
4424 
4425  __ JumpIfSmi(input_reg, &is_smi);
4426 
4427  // Check for heap number
4428  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4429  factory()->heap_number_map());
4430  __ j(equal, &heap_number, Label::kNear);
4431 
4432  // Check for undefined. Undefined is converted to zero for clamping
4433  // conversions.
4434  __ cmp(input_reg, factory()->undefined_value());
4435  DeoptimizeIf(not_equal, instr->environment());
4436  __ mov(input_reg, 0);
4437  __ jmp(&done, Label::kNear);
4438 
4439  // Heap number
4440  __ bind(&heap_number);
4441  __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4442  __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
4443  __ jmp(&done, Label::kNear);
4444 
4445  // smi
4446  __ bind(&is_smi);
4447  __ SmiUntag(input_reg);
4448  __ ClampUint8(input_reg);
4449 
4450  __ bind(&done);
4451 }
4452 
4453 
4454 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4455  Register reg = ToRegister(instr->TempAt(0));
4456 
4457  Handle<JSObject> holder = instr->holder();
4458  Handle<JSObject> current_prototype = instr->prototype();
4459 
4460  // Load prototype object.
4461  __ LoadHeapObject(reg, current_prototype);
4462 
4463  // Check prototype maps up to the holder.
4464  while (!current_prototype.is_identical_to(holder)) {
4465  DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4466  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4467 
4468  current_prototype =
4469  Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4470  // Load next prototype object.
4471  __ LoadHeapObject(reg, current_prototype);
4472  }
4473 
4474  // Check the holder map.
4475  DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4476  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4477 }
4478 
4479 
4480 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4481  class DeferredAllocateObject: public LDeferredCode {
4482  public:
4483  DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4484  : LDeferredCode(codegen), instr_(instr) { }
4485  virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4486  virtual LInstruction* instr() { return instr_; }
4487  private:
4488  LAllocateObject* instr_;
4489  };
4490 
4491  DeferredAllocateObject* deferred =
4492  new(zone()) DeferredAllocateObject(this, instr);
4493 
4494  Register result = ToRegister(instr->result());
4495  Register scratch = ToRegister(instr->TempAt(0));
4496  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4497  Handle<Map> initial_map(constructor->initial_map());
4498  int instance_size = initial_map->instance_size();
4499  ASSERT(initial_map->pre_allocated_property_fields() +
4500  initial_map->unused_property_fields() -
4501  initial_map->inobject_properties() == 0);
4502 
4503  // Allocate memory for the object. The initial map might change when
4504  // the constructor's prototype changes, but instance size and property
4505  // counts remain unchanged (if slack tracking finished).
4506  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4507  __ AllocateInNewSpace(instance_size,
4508  result,
4509  no_reg,
4510  scratch,
4511  deferred->entry(),
4512  TAG_OBJECT);
4513 
4514  __ bind(deferred->exit());
4515  if (FLAG_debug_code) {
4516  Label is_in_new_space;
4517  __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4518  __ Abort("Allocated object is not in new-space");
4519  __ bind(&is_in_new_space);
4520  }
4521 
4522  // Load the initial map.
4523  Register map = scratch;
4524  __ LoadHeapObject(scratch, constructor);
4526 
4527  if (FLAG_debug_code) {
4528  __ AbortIfSmi(map);
4530  instance_size >> kPointerSizeLog2);
4531  __ Assert(equal, "Unexpected instance size");
4533  initial_map->pre_allocated_property_fields());
4534  __ Assert(equal, "Unexpected pre-allocated property fields count");
4536  initial_map->unused_property_fields());
4537  __ Assert(equal, "Unexpected unused property fields count");
4539  initial_map->inobject_properties());
4540  __ Assert(equal, "Unexpected in-object property fields count");
4541  }
4542 
4543  // Initialize map and fields of the newly allocated object.
4544  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4545  __ mov(FieldOperand(result, JSObject::kMapOffset), map);
4546  __ mov(scratch, factory()->empty_fixed_array());
4547  __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
4548  __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
4549  if (initial_map->inobject_properties() != 0) {
4550  __ mov(scratch, factory()->undefined_value());
4551  for (int i = 0; i < initial_map->inobject_properties(); i++) {
4552  int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4553  __ mov(FieldOperand(result, property_offset), scratch);
4554  }
4555  }
4556 }
4557 
4558 
4559 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4560  Register result = ToRegister(instr->result());
4561  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4562  Handle<Map> initial_map(constructor->initial_map());
4563  int instance_size = initial_map->instance_size();
4564 
4565  // TODO(3095996): Get rid of this. For now, we need to make the
4566  // result register contain a valid pointer because it is already
4567  // contained in the register pointer map.
4568  __ Set(result, Immediate(0));
4569 
4570  PushSafepointRegistersScope scope(this);
4571  __ push(Immediate(Smi::FromInt(instance_size)));
4572  CallRuntimeFromDeferred(
4573  Runtime::kAllocateInNewSpace, 1, instr, instr->context());
4574  __ StoreToSafepointRegisterSlot(result, eax);
4575 }
4576 
4577 
4578 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4579  ASSERT(ToRegister(instr->context()).is(esi));
4580  Heap* heap = isolate()->heap();
4581  ElementsKind boilerplate_elements_kind =
4582  instr->hydrogen()->boilerplate_elements_kind();
4583 
4584  // Deopt if the array literal boilerplate ElementsKind is of a type different
4585  // than the expected one. The check isn't necessary if the boilerplate has
4586  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4588  boilerplate_elements_kind, true)) {
4589  __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
4591  // Load the map's "bit field 2". We only need the first byte,
4592  // but the following masking takes care of that anyway.
4594  // Retrieve elements_kind from bit field 2.
4595  __ and_(ebx, Map::kElementsKindMask);
4596  __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
4597  DeoptimizeIf(not_equal, instr->environment());
4598  }
4599 
4600  // Set up the parameters to the stub/runtime call.
4603  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
4604  // Boilerplate already exists, constant elements are never accessed.
4605  // Pass an empty fixed array.
4606  __ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
4607 
4608  // Pick the right runtime function or stub to call.
4609  int length = instr->hydrogen()->length();
4610  if (instr->hydrogen()->IsCopyOnWrite()) {
4611  ASSERT(instr->hydrogen()->depth() == 1);
4614  FastCloneShallowArrayStub stub(mode, length);
4615  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4616  } else if (instr->hydrogen()->depth() > 1) {
4617  CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4619  CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4620  } else {
4622  boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4625  FastCloneShallowArrayStub stub(mode, length);
4626  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4627  }
4628 }
4629 
4630 
4631 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4632  Register result,
4633  Register source,
4634  int* offset) {
4635  ASSERT(!source.is(ecx));
4636  ASSERT(!result.is(ecx));
4637 
4638  if (FLAG_debug_code) {
4639  __ LoadHeapObject(ecx, object);
4640  __ cmp(source, ecx);
4641  __ Assert(equal, "Unexpected object literal boilerplate");
4642  __ mov(ecx, FieldOperand(source, HeapObject::kMapOffset));
4643  __ cmp(ecx, Handle<Map>(object->map()));
4644  __ Assert(equal, "Unexpected boilerplate map");
4646  __ and_(ecx, Map::kElementsKindMask);
4647  __ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift);
4648  __ Assert(equal, "Unexpected boilerplate elements kind");
4649  }
4650 
4651  // Only elements backing stores for non-COW arrays need to be copied.
4652  Handle<FixedArrayBase> elements(object->elements());
4653  bool has_elements = elements->length() > 0 &&
4654  elements->map() != isolate()->heap()->fixed_cow_array_map();
4655 
4656  // Increase the offset so that subsequent objects end up right after
4657  // this object and its backing store.
4658  int object_offset = *offset;
4659  int object_size = object->map()->instance_size();
4660  int elements_offset = *offset + object_size;
4661  int elements_size = has_elements ? elements->Size() : 0;
4662  *offset += object_size + elements_size;
4663 
4664  // Copy object header.
4665  ASSERT(object->properties()->length() == 0);
4666  int inobject_properties = object->map()->inobject_properties();
4667  int header_size = object_size - inobject_properties * kPointerSize;
4668  for (int i = 0; i < header_size; i += kPointerSize) {
4669  if (has_elements && i == JSObject::kElementsOffset) {
4670  __ lea(ecx, Operand(result, elements_offset));
4671  } else {
4672  __ mov(ecx, FieldOperand(source, i));
4673  }
4674  __ mov(FieldOperand(result, object_offset + i), ecx);
4675  }
4676 
4677  // Copy in-object properties.
4678  for (int i = 0; i < inobject_properties; i++) {
4679  int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4680  Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4681  if (value->IsJSObject()) {
4682  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4683  __ lea(ecx, Operand(result, *offset));
4684  __ mov(FieldOperand(result, total_offset), ecx);
4685  __ LoadHeapObject(source, value_object);
4686  EmitDeepCopy(value_object, result, source, offset);
4687  } else if (value->IsHeapObject()) {
4688  __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
4689  __ mov(FieldOperand(result, total_offset), ecx);
4690  } else {
4691  __ mov(FieldOperand(result, total_offset), Immediate(value));
4692  }
4693  }
4694 
4695  if (has_elements) {
4696  // Copy elements backing store header.
4697  __ LoadHeapObject(source, elements);
4698  for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4699  __ mov(ecx, FieldOperand(source, i));
4700  __ mov(FieldOperand(result, elements_offset + i), ecx);
4701  }
4702 
4703  // Copy elements backing store content.
4704  int elements_length = elements->length();
4705  if (elements->IsFixedDoubleArray()) {
4706  Handle<FixedDoubleArray> double_array =
4708  for (int i = 0; i < elements_length; i++) {
4709  int64_t value = double_array->get_representation(i);
4710  int32_t value_low = value & 0xFFFFFFFF;
4711  int32_t value_high = value >> 32;
4712  int total_offset =
4713  elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4714  __ mov(FieldOperand(result, total_offset), Immediate(value_low));
4715  __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
4716  }
4717  } else if (elements->IsFixedArray()) {
4718  Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
4719  for (int i = 0; i < elements_length; i++) {
4720  int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4721  Handle<Object> value(fast_elements->get(i));
4722  if (value->IsJSObject()) {
4723  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4724  __ lea(ecx, Operand(result, *offset));
4725  __ mov(FieldOperand(result, total_offset), ecx);
4726  __ LoadHeapObject(source, value_object);
4727  EmitDeepCopy(value_object, result, source, offset);
4728  } else if (value->IsHeapObject()) {
4729  __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
4730  __ mov(FieldOperand(result, total_offset), ecx);
4731  } else {
4732  __ mov(FieldOperand(result, total_offset), Immediate(value));
4733  }
4734  }
4735  } else {
4736  UNREACHABLE();
4737  }
4738  }
4739 }
4740 
4741 
4742 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4743  ASSERT(ToRegister(instr->context()).is(esi));
4744  int size = instr->hydrogen()->total_size();
4745  ElementsKind boilerplate_elements_kind =
4746  instr->hydrogen()->boilerplate()->GetElementsKind();
4747 
4748  // Deopt if the literal boilerplate ElementsKind is of a type different than
4749  // the expected one. The check isn't necessary if the boilerplate has already
4750  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4752  boilerplate_elements_kind, true)) {
4753  __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
4755  // Load the map's "bit field 2". We only need the first byte,
4756  // but the following masking takes care of that anyway.
4758  // Retrieve elements_kind from bit field 2.
4759  __ and_(ecx, Map::kElementsKindMask);
4760  __ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
4761  DeoptimizeIf(not_equal, instr->environment());
4762  }
4763 
4764  // Allocate all objects that are part of the literal in one big
4765  // allocation. This avoids multiple limit checks.
4766  Label allocated, runtime_allocate;
4767  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
4768  __ jmp(&allocated);
4769 
4770  __ bind(&runtime_allocate);
4771  __ push(Immediate(Smi::FromInt(size)));
4772  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4773 
4774  __ bind(&allocated);
4775  int offset = 0;
4776  __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
4777  EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
4778  ASSERT_EQ(size, offset);
4779 }
4780 
4781 
4782 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4783  ASSERT(ToRegister(instr->context()).is(esi));
4784  Handle<FixedArray> literals(instr->environment()->closure()->literals());
4785  Handle<FixedArray> constant_properties =
4786  instr->hydrogen()->constant_properties();
4787 
4788  // Set up the parameters to the stub/runtime call.
4789  __ PushHeapObject(literals);
4790  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
4791  __ push(Immediate(constant_properties));
4792  int flags = instr->hydrogen()->fast_elements()
4795  flags |= instr->hydrogen()->has_function()
4798  __ push(Immediate(Smi::FromInt(flags)));
4799 
4800  // Pick the right runtime function or stub to call.
4801  int properties_count = constant_properties->length() / 2;
4802  if (instr->hydrogen()->depth() > 1) {
4803  CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4804  } else if (flags != ObjectLiteral::kFastElements ||
4806  CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4807  } else {
4808  FastCloneShallowObjectStub stub(properties_count);
4809  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4810  }
4811 }
4812 
4813 
4814 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4815  ASSERT(ToRegister(instr->InputAt(0)).is(eax));
4816  __ push(eax);
4817  CallRuntime(Runtime::kToFastProperties, 1, instr);
4818 }
4819 
4820 
4821 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4822  ASSERT(ToRegister(instr->context()).is(esi));
4823  Label materialized;
4824  // Registers will be used as follows:
4825  // edi = JS function.
4826  // ecx = literals array.
4827  // ebx = regexp literal.
4828  // eax = regexp literal clone.
4829  // esi = context.
4832  int literal_offset = FixedArray::kHeaderSize +
4833  instr->hydrogen()->literal_index() * kPointerSize;
4834  __ mov(ebx, FieldOperand(ecx, literal_offset));
4835  __ cmp(ebx, factory()->undefined_value());
4836  __ j(not_equal, &materialized, Label::kNear);
4837 
4838  // Create regexp literal using runtime function
4839  // Result will be in eax.
4840  __ push(ecx);
4841  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
4842  __ push(Immediate(instr->hydrogen()->pattern()));
4843  __ push(Immediate(instr->hydrogen()->flags()));
4844  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4845  __ mov(ebx, eax);
4846 
4847  __ bind(&materialized);
4849  Label allocated, runtime_allocate;
4850  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
4851  __ jmp(&allocated);
4852 
4853  __ bind(&runtime_allocate);
4854  __ push(ebx);
4855  __ push(Immediate(Smi::FromInt(size)));
4856  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4857  __ pop(ebx);
4858 
4859  __ bind(&allocated);
4860  // Copy the content into the newly allocated memory.
4861  // (Unroll copy loop once for better throughput).
4862  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4863  __ mov(edx, FieldOperand(ebx, i));
4864  __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
4865  __ mov(FieldOperand(eax, i), edx);
4866  __ mov(FieldOperand(eax, i + kPointerSize), ecx);
4867  }
4868  if ((size % (2 * kPointerSize)) != 0) {
4869  __ mov(edx, FieldOperand(ebx, size - kPointerSize));
4870  __ mov(FieldOperand(eax, size - kPointerSize), edx);
4871  }
4872 }
4873 
4874 
4875 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4876  ASSERT(ToRegister(instr->context()).is(esi));
4877  // Use the fast case closure allocation code that allocates in new
4878  // space for nested functions that don't need literals cloning.
4879  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4880  bool pretenure = instr->hydrogen()->pretenure();
4881  if (!pretenure && shared_info->num_literals() == 0) {
4882  FastNewClosureStub stub(shared_info->language_mode());
4883  __ push(Immediate(shared_info));
4884  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4885  } else {
4886  __ push(esi);
4887  __ push(Immediate(shared_info));
4888  __ push(Immediate(pretenure
4889  ? factory()->true_value()
4890  : factory()->false_value()));
4891  CallRuntime(Runtime::kNewClosure, 3, instr);
4892  }
4893 }
4894 
4895 
4896 void LCodeGen::DoTypeof(LTypeof* instr) {
4897  LOperand* input = instr->InputAt(1);
4898  EmitPushTaggedOperand(input);
4899  CallRuntime(Runtime::kTypeof, 1, instr);
4900 }
4901 
4902 
4903 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4904  Register input = ToRegister(instr->InputAt(0));
4905  int true_block = chunk_->LookupDestination(instr->true_block_id());
4906  int false_block = chunk_->LookupDestination(instr->false_block_id());
4907  Label* true_label = chunk_->GetAssemblyLabel(true_block);
4908  Label* false_label = chunk_->GetAssemblyLabel(false_block);
4909 
4910  Condition final_branch_condition =
4911  EmitTypeofIs(true_label, false_label, input, instr->type_literal());
4912  if (final_branch_condition != no_condition) {
4913  EmitBranch(true_block, false_block, final_branch_condition);
4914  }
4915 }
4916 
4917 
4918 Condition LCodeGen::EmitTypeofIs(Label* true_label,
4919  Label* false_label,
4920  Register input,
4921  Handle<String> type_name) {
4922  Condition final_branch_condition = no_condition;
4923  if (type_name->Equals(heap()->number_symbol())) {
4924  __ JumpIfSmi(input, true_label);
4926  factory()->heap_number_map());
4927  final_branch_condition = equal;
4928 
4929  } else if (type_name->Equals(heap()->string_symbol())) {
4930  __ JumpIfSmi(input, false_label);
4931  __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
4932  __ j(above_equal, false_label);
4933  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
4934  1 << Map::kIsUndetectable);
4935  final_branch_condition = zero;
4936 
4937  } else if (type_name->Equals(heap()->boolean_symbol())) {
4938  __ cmp(input, factory()->true_value());
4939  __ j(equal, true_label);
4940  __ cmp(input, factory()->false_value());
4941  final_branch_condition = equal;
4942 
4943  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4944  __ cmp(input, factory()->null_value());
4945  final_branch_condition = equal;
4946 
4947  } else if (type_name->Equals(heap()->undefined_symbol())) {
4948  __ cmp(input, factory()->undefined_value());
4949  __ j(equal, true_label);
4950  __ JumpIfSmi(input, false_label);
4951  // Check for undetectable objects => true.
4952  __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
4953  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
4954  1 << Map::kIsUndetectable);
4955  final_branch_condition = not_zero;
4956 
4957  } else if (type_name->Equals(heap()->function_symbol())) {
4959  __ JumpIfSmi(input, false_label);
4960  __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
4961  __ j(equal, true_label);
4962  __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
4963  final_branch_condition = equal;
4964 
4965  } else if (type_name->Equals(heap()->object_symbol())) {
4966  __ JumpIfSmi(input, false_label);
4967  if (!FLAG_harmony_typeof) {
4968  __ cmp(input, factory()->null_value());
4969  __ j(equal, true_label);
4970  }
4971  __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
4972  __ j(below, false_label);
4973  __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
4974  __ j(above, false_label);
4975  // Check for undetectable objects => false.
4976  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
4977  1 << Map::kIsUndetectable);
4978  final_branch_condition = zero;
4979 
4980  } else {
4981  __ jmp(false_label);
4982  }
4983  return final_branch_condition;
4984 }
4985 
4986 
4987 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4988  Register temp = ToRegister(instr->TempAt(0));
4989  int true_block = chunk_->LookupDestination(instr->true_block_id());
4990  int false_block = chunk_->LookupDestination(instr->false_block_id());
4991 
4992  EmitIsConstructCall(temp);
4993  EmitBranch(true_block, false_block, equal);
4994 }
4995 
4996 
4997 void LCodeGen::EmitIsConstructCall(Register temp) {
4998  // Get the frame pointer for the calling frame.
4999  __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
5000 
5001  // Skip the arguments adaptor frame if it exists.
5002  Label check_frame_marker;
5003  __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5005  __ j(not_equal, &check_frame_marker, Label::kNear);
5006  __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5007 
5008  // Check the marker in the calling frame.
5009  __ bind(&check_frame_marker);
5010  __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5011  Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
5012 }
5013 
5014 
5015 void LCodeGen::EnsureSpaceForLazyDeopt() {
5016  // Ensure that we have enough space after the previous lazy-bailout
5017  // instruction for patching the code here.
5018  int current_pc = masm()->pc_offset();
5019  int patch_size = Deoptimizer::patch_size();
5020  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5021  int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5022  __ Nop(padding_size);
5023  }
5024  last_lazy_deopt_pc_ = masm()->pc_offset();
5025 }
5026 
5027 
5028 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5029  EnsureSpaceForLazyDeopt();
5030  ASSERT(instr->HasEnvironment());
5031  LEnvironment* env = instr->environment();
5032  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5033  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5034 }
5035 
5036 
5037 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5038  DeoptimizeIf(no_condition, instr->environment());
5039 }
5040 
5041 
5042 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5043  LOperand* obj = instr->object();
5044  LOperand* key = instr->key();
5045  __ push(ToOperand(obj));
5046  EmitPushTaggedOperand(key);
5047  ASSERT(instr->HasPointerMap());
5048  LPointerMap* pointers = instr->pointer_map();
5049  RecordPosition(pointers->position());
5050  // Create safepoint generator that will also ensure enough space in the
5051  // reloc info for patching in deoptimization (since this is invoking a
5052  // builtin)
5053  SafepointGenerator safepoint_generator(
5054  this, pointers, Safepoint::kLazyDeopt);
5055  __ push(Immediate(Smi::FromInt(strict_mode_flag())));
5056  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
5057 }
5058 
5059 
5060 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5061  PushSafepointRegistersScope scope(this);
5063  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5064  RecordSafepointWithLazyDeopt(
5065  instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5066  ASSERT(instr->HasEnvironment());
5067  LEnvironment* env = instr->environment();
5068  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5069 }
5070 
5071 
5072 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5073  class DeferredStackCheck: public LDeferredCode {
5074  public:
5075  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5076  : LDeferredCode(codegen), instr_(instr) { }
5077  virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5078  virtual LInstruction* instr() { return instr_; }
5079  private:
5080  LStackCheck* instr_;
5081  };
5082 
5083  ASSERT(instr->HasEnvironment());
5084  LEnvironment* env = instr->environment();
5085  // There is no LLazyBailout instruction for stack-checks. We have to
5086  // prepare for lazy deoptimization explicitly here.
5087  if (instr->hydrogen()->is_function_entry()) {
5088  // Perform stack overflow check.
5089  Label done;
5090  ExternalReference stack_limit =
5091  ExternalReference::address_of_stack_limit(isolate());
5092  __ cmp(esp, Operand::StaticVariable(stack_limit));
5093  __ j(above_equal, &done, Label::kNear);
5094 
5095  ASSERT(instr->context()->IsRegister());
5096  ASSERT(ToRegister(instr->context()).is(esi));
5097  StackCheckStub stub;
5098  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5099  EnsureSpaceForLazyDeopt();
5100  __ bind(&done);
5101  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5102  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5103  } else {
5104  ASSERT(instr->hydrogen()->is_backwards_branch());
5105  // Perform stack overflow check if this goto needs it before jumping.
5106  DeferredStackCheck* deferred_stack_check =
5107  new(zone()) DeferredStackCheck(this, instr);
5108  ExternalReference stack_limit =
5109  ExternalReference::address_of_stack_limit(isolate());
5110  __ cmp(esp, Operand::StaticVariable(stack_limit));
5111  __ j(below, deferred_stack_check->entry());
5112  EnsureSpaceForLazyDeopt();
5113  __ bind(instr->done_label());
5114  deferred_stack_check->SetExit(instr->done_label());
5115  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5116  // Don't record a deoptimization index for the safepoint here.
5117  // This will be done explicitly when emitting call and the safepoint in
5118  // the deferred code.
5119  }
5120 }
5121 
5122 
5123 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5124  // This is a pseudo-instruction that ensures that the environment here is
5125  // properly registered for deoptimization and records the assembler's PC
5126  // offset.
5127  LEnvironment* environment = instr->environment();
5128  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5129  instr->SpilledDoubleRegisterArray());
5130 
5131  // If the environment were already registered, we would have no way of
5132  // backpatching it with the spill slot operands.
5133  ASSERT(!environment->HasBeenRegistered());
5134  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5135  ASSERT(osr_pc_offset_ == -1);
5136  osr_pc_offset_ = masm()->pc_offset();
5137 }
5138 
5139 
5140 void LCodeGen::DoIn(LIn* instr) {
5141  LOperand* obj = instr->object();
5142  LOperand* key = instr->key();
5143  EmitPushTaggedOperand(key);
5144  EmitPushTaggedOperand(obj);
5145  ASSERT(instr->HasPointerMap());
5146  LPointerMap* pointers = instr->pointer_map();
5147  RecordPosition(pointers->position());
5148  SafepointGenerator safepoint_generator(
5149  this, pointers, Safepoint::kLazyDeopt);
5150  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
5151 }
5152 
5153 
5154 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5155  __ cmp(eax, isolate()->factory()->undefined_value());
5156  DeoptimizeIf(equal, instr->environment());
5157 
5158  __ cmp(eax, isolate()->factory()->null_value());
5159  DeoptimizeIf(equal, instr->environment());
5160 
5161  __ test(eax, Immediate(kSmiTagMask));
5162  DeoptimizeIf(zero, instr->environment());
5163 
5165  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
5166  DeoptimizeIf(below_equal, instr->environment());
5167 
5168  Label use_cache, call_runtime;
5169  __ CheckEnumCache(&call_runtime);
5170 
5172  __ jmp(&use_cache, Label::kNear);
5173 
5174  // Get the set of properties to enumerate.
5175  __ bind(&call_runtime);
5176  __ push(eax);
5177  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5178 
5180  isolate()->factory()->meta_map());
5181  DeoptimizeIf(not_equal, instr->environment());
5182  __ bind(&use_cache);
5183 }
5184 
5185 
5186 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5187  Register map = ToRegister(instr->map());
5188  Register result = ToRegister(instr->result());
5189  __ LoadInstanceDescriptors(map, result);
5190  __ mov(result,
5192  __ mov(result,
5193  FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5194  __ test(result, result);
5195  DeoptimizeIf(equal, instr->environment());
5196 }
5197 
5198 
5199 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5200  Register object = ToRegister(instr->value());
5201  __ cmp(ToRegister(instr->map()),
5203  DeoptimizeIf(not_equal, instr->environment());
5204 }
5205 
5206 
5207 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5208  Register object = ToRegister(instr->object());
5209  Register index = ToRegister(instr->index());
5210 
5211  Label out_of_object, done;
5212  __ cmp(index, Immediate(0));
5213  __ j(less, &out_of_object);
5214  __ mov(object, FieldOperand(object,
5215  index,
5218  __ jmp(&done, Label::kNear);
5219 
5220  __ bind(&out_of_object);
5221  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
5222  __ neg(index);
5223  // Index is now equal to out of object property index plus 1.
5224  __ mov(object, FieldOperand(object,
5225  index,
5227  FixedArray::kHeaderSize - kPointerSize));
5228  __ bind(&done);
5229 }
5230 
5231 
5232 #undef __
5233 
5234 } } // namespace v8::internal
5235 
5236 #endif // V8_TARGET_ARCH_IA32
byte * Address
Definition: globals.h:172
static const int kElementsKindMask
Definition: objects.h:5019
const int kMinInt
Definition: globals.h:225
static const int kBitFieldOffset
Definition: objects.h:4994
static LGap * cast(LInstruction *instr)
Definition: lithium-arm.h:318
const intptr_t kSmiTagMask
Definition: v8.h:3855
static const int kCodeEntryOffset
Definition: objects.h:5981
static const int kMaxAsciiCharCode
Definition: objects.h:7107
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:5982
const char * ToCString(const v8::String::Utf8Value &value)
static int SlotOffset(int index)
Definition: contexts.h:408
void PrintF(const char *format,...)
Definition: v8utils.cc:40
static const uint32_t kExponentMask
Definition: objects.h:1317
static Smi * FromInt(int value)
Definition: objects-inl.h:973
bool IsFastObjectElementsKind(ElementsKind kind)
const int KB
Definition: globals.h:221
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
const Register esp
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
Definition: handles.h:81
static const int kGlobalReceiverOffset
Definition: objects.h:6085
static const int kNativeByteOffset
Definition: objects.h:5778
Flag flags[]
Definition: flags.cc:1467
static const int kExponentBias
Definition: objects.h:1321
static const int kNoNumber
Definition: ast.h:197
int int32_t
Definition: unicode.cc:47
static XMMRegister FromAllocationIndex(int index)
static bool IsSupported(CpuFeature f)
static const int kStrictModeBitWithinByte
Definition: objects.h:5764
const int kNoAlignmentPadding
Definition: frames-ia32.h:56
static const int kExternalPointerOffset
Definition: objects.h:3720
static const int kSize
Definition: objects.h:6433
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
#define ASSERT_GE(v1, v2)
Definition: checks.h:273
const int kPointerSizeLog2
Definition: globals.h:246
static const int kInstanceSizeOffset
Definition: objects.h:4981
static const int kInObjectFieldCount
Definition: objects.h:6487
static const int kStressDeoptCounterOffset
Definition: objects.h:5686
static const int kMaximumSlots
Definition: code-stubs.h:343
static const int kInstanceClassNameOffset
Definition: objects.h:5609
static const int kUnusedPropertyFieldsOffset
Definition: objects.h:4993
static const int kGlobalContextOffset
Definition: objects.h:6084
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:5986
const int kAlignmentPaddingPushed
Definition: frames-ia32.h:57
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
Definition: handles.cc:326
const Register edi
static const int kHashFieldOffset
Definition: objects.h:7099
Condition ReverseCondition(Condition cond)
#define IN
const uint32_t kSlotsZapValue
Definition: v8globals.h:92
static const int kLiteralsOffset
Definition: objects.h:5987
const Register ebp
#define UNREACHABLE()
Definition: checks.h:50
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7098
static const int kExponentShift
Definition: objects.h:1322
const Register eax
static const int kValueOffset
Definition: objects.h:1307
const uint32_t kHoleNanUpper32
Definition: v8globals.h:476
const int kDoubleSize
Definition: globals.h:232
const XMMRegister xmm1
const int kPointerSize
Definition: globals.h:234
Operand FieldOperand(Register object, int offset)
const Register ecx
const int kHeapObjectTag
Definition: v8.h:3848
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:269
const uint32_t kHoleNanLower32
Definition: v8globals.h:477
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
#define __
static const int kDynamicAlignmentStateOffset
Definition: frames-ia32.h:127
static void VPrint(const char *format, va_list args)
static const int kCacheStampOffset
Definition: objects.h:6280
static const int kPropertiesOffset
Definition: objects.h:2113
const int kAlignmentZapValue
Definition: frames-ia32.h:58
static const int kInObjectPropertiesOffset
Definition: objects.h:4983
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
Definition: objects.h:2374
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static void EnsureRelocSpaceForLazyDeoptimization(Handle< Code > code)
static const int kElementsOffset
Definition: objects.h:2114
static const int kNativeBitWithinByte
Definition: objects.h:5770
static const int kContainsCachedArrayIndexMask
Definition: objects.h:7154
bool IsPowerOf2(T x)
Definition: utils.h:50
static Vector< T > New(int length)
Definition: utils.h:369
const int kBitsPerInt
Definition: globals.h:254
int ElementsKindToShiftSize(ElementsKind elements_kind)
Definition: lithium.cc:213
Vector< const char > CStrVector(const char *data)
Definition: utils.h:525
static int OffsetOfElementAt(int index)
Definition: objects.h:2291
static const int kLengthOffset
Definition: objects.h:8111
static int SizeFor(int length)
Definition: objects.h:2288
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:2233
static const int kEnumerationIndexOffset
Definition: objects.h:2622
static const int kMapOffset
Definition: objects.h:1219
static const int kValueOffset
Definition: objects.h:6272
bool is(Register reg) const
static const int kLengthOffset
Definition: objects.h:2232
const Register ebx
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:445
static const int kHasNonInstancePrototype
Definition: objects.h:5001
ElementsKind GetInitialFastElementsKind()
static const uint32_t kSignMask
Definition: objects.h:1316
static const int kStrictModeByteOffset
Definition: objects.h:5774
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
const Register esi
static const int kElementsKindShift
Definition: objects.h:5015
static const int kConstructorOffset
Definition: objects.h:4954
const int kSmiTag
Definition: v8.h:3853
#define ASSERT_NE(v1, v2)
Definition: checks.h:272
static const int kIsUndetectable
Definition: objects.h:5005
static const int kHeaderSize
Definition: objects.h:2115
static const int kPrototypeOffset
Definition: objects.h:4953
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static const int kMaxLength
Definition: objects.h:7166
static const int kValueOffset
Definition: objects.h:6188
static const int kExponentBits
Definition: objects.h:1320
const XMMRegister xmm2
const Register edx
static const int kSharedFunctionInfoOffset
Definition: objects.h:5984
Register ToRegister(int num)
static const int kMaxValue
Definition: objects.h:1006
static const int kMantissaBits
Definition: objects.h:1319
static const int kBitField2Offset
Definition: objects.h:4995
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
Definition: ic.cc:2544
#define ARRAY_SIZE(a)
Definition: globals.h:295
static const int kExponentOffset
Definition: objects.h:1313
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1223
static JSObject * cast(Object *obj)
FlagType type() const
Definition: flags.cc:1358
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
Definition: objects.h:4992
virtual void BeforeCall(int call_size) const
static const int kPreAllocatedPropertyFieldsOffset
Definition: objects.h:4986
static const int kMantissaOffset
Definition: objects.h:1312
const XMMRegister xmm0