v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 
40  public:
41  SafepointGenerator(LCodeGen* codegen,
42  LPointerMap* pointers,
43  Safepoint::DeoptMode mode)
44  : codegen_(codegen),
45  pointers_(pointers),
46  deopt_mode_(mode) { }
47  virtual ~SafepointGenerator() { }
48 
49  virtual void BeforeCall(int call_size) const { }
50 
51  virtual void AfterCall() const {
52  codegen_->RecordSafepoint(pointers_, deopt_mode_);
53  }
54 
55  private:
56  LCodeGen* codegen_;
57  LPointerMap* pointers_;
58  Safepoint::DeoptMode deopt_mode_;
59 };
60 
61 
62 #define __ masm()->
63 
64 bool LCodeGen::GenerateCode() {
65  HPhase phase("Z_Code generation", chunk());
66  ASSERT(is_unused());
67  status_ = GENERATING;
68  CpuFeatures::Scope scope1(VFP3);
69  CpuFeatures::Scope scope2(ARMv7);
70 
71  CodeStub::GenerateFPStubs();
72 
73  // Open a frame scope to indicate that there is a frame on the stack. The
74  // NONE indicates that the scope shouldn't actually generate code to set up
75  // the frame (that is done in GeneratePrologue).
76  FrameScope frame_scope(masm_, StackFrame::NONE);
77 
78  return GeneratePrologue() &&
79  GenerateBody() &&
80  GenerateDeferredCode() &&
81  GenerateDeoptJumpTable() &&
82  GenerateSafepointTable();
83 }
84 
85 
86 void LCodeGen::FinishCode(Handle<Code> code) {
87  ASSERT(is_done());
88  code->set_stack_slots(GetStackSlotCount());
89  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90  PopulateDeoptimizationData(code);
91 }
92 
93 
94 void LCodeGen::Abort(const char* format, ...) {
95  if (FLAG_trace_bailout) {
96  SmartArrayPointer<char> name(
97  info()->shared_info()->DebugName()->ToCString());
98  PrintF("Aborting LCodeGen in @\"%s\": ", *name);
99  va_list arguments;
100  va_start(arguments, format);
101  OS::VPrint(format, arguments);
102  va_end(arguments);
103  PrintF("\n");
104  }
105  status_ = ABORTED;
106 }
107 
108 
109 void LCodeGen::Comment(const char* format, ...) {
110  if (!FLAG_code_comments) return;
111  char buffer[4 * KB];
112  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
113  va_list arguments;
114  va_start(arguments, format);
115  builder.AddFormattedList(format, arguments);
116  va_end(arguments);
117 
118  // Copy the string before recording it in the assembler to avoid
119  // issues when the stack allocated buffer goes out of scope.
120  size_t length = builder.position();
121  Vector<char> copy = Vector<char>::New(length + 1);
122  memcpy(copy.start(), builder.Finalize(), copy.length());
123  masm()->RecordComment(copy.start());
124 }
125 
126 
127 bool LCodeGen::GeneratePrologue() {
128  ASSERT(is_generating());
129 
130 #ifdef DEBUG
131  if (strlen(FLAG_stop_at) > 0 &&
132  info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
133  __ stop("stop_at");
134  }
135 #endif
136 
137  // r1: Callee's JS function.
138  // cp: Callee's context.
139  // fp: Caller's frame pointer.
140  // lr: Caller's pc.
141 
142  // Strict mode functions and builtins need to replace the receiver
143  // with undefined when called as functions (without an explicit
144  // receiver object). r5 is zero for method calls and non-zero for
145  // function calls.
146  if (!info_->is_classic_mode() || info_->is_native()) {
147  Label ok;
148  __ cmp(r5, Operand(0));
149  __ b(eq, &ok);
150  int receiver_offset = scope()->num_parameters() * kPointerSize;
151  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
152  __ str(r2, MemOperand(sp, receiver_offset));
153  __ bind(&ok);
154  }
155 
156  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
157  __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
158 
159  // Reserve space for the stack slots needed by the code.
160  int slots = GetStackSlotCount();
161  if (slots > 0) {
162  if (FLAG_debug_code) {
163  __ mov(r0, Operand(slots));
164  __ mov(r2, Operand(kSlotsZapValue));
165  Label loop;
166  __ bind(&loop);
167  __ push(r2);
168  __ sub(r0, r0, Operand(1), SetCC);
169  __ b(ne, &loop);
170  } else {
171  __ sub(sp, sp, Operand(slots * kPointerSize));
172  }
173  }
174 
175  // Possibly allocate a local context.
176  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
177  if (heap_slots > 0) {
178  Comment(";;; Allocate local context");
179  // Argument to NewContext is the function, which is in r1.
180  __ push(r1);
181  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
182  FastNewContextStub stub(heap_slots);
183  __ CallStub(&stub);
184  } else {
185  __ CallRuntime(Runtime::kNewFunctionContext, 1);
186  }
187  RecordSafepoint(Safepoint::kNoLazyDeopt);
188  // Context is returned in both r0 and cp. It replaces the context
189  // passed to us. It's saved in the stack and kept live in cp.
191  // Copy any necessary parameters into the context.
192  int num_parameters = scope()->num_parameters();
193  for (int i = 0; i < num_parameters; i++) {
194  Variable* var = scope()->parameter(i);
195  if (var->IsContextSlot()) {
196  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
197  (num_parameters - 1 - i) * kPointerSize;
198  // Load parameter from stack.
199  __ ldr(r0, MemOperand(fp, parameter_offset));
200  // Store it in the context.
201  MemOperand target = ContextOperand(cp, var->index());
202  __ str(r0, target);
203  // Update the write barrier. This clobbers r3 and r0.
204  __ RecordWriteContextSlot(
205  cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
206  }
207  }
208  Comment(";;; End allocate local context");
209  }
210 
211  // Trace the call.
212  if (FLAG_trace) {
213  __ CallRuntime(Runtime::kTraceEnter, 0);
214  }
215  return !is_aborted();
216 }
217 
218 
219 bool LCodeGen::GenerateBody() {
220  ASSERT(is_generating());
221  bool emit_instructions = true;
222  for (current_instruction_ = 0;
223  !is_aborted() && current_instruction_ < instructions_->length();
224  current_instruction_++) {
225  LInstruction* instr = instructions_->at(current_instruction_);
226  if (instr->IsLabel()) {
227  LLabel* label = LLabel::cast(instr);
228  emit_instructions = !label->HasReplacement();
229  }
230 
231  if (emit_instructions) {
232  Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
233  instr->CompileToNative(this);
234  }
235  }
236  EnsureSpaceForLazyDeopt();
237  return !is_aborted();
238 }
239 
240 
241 bool LCodeGen::GenerateDeferredCode() {
242  ASSERT(is_generating());
243  if (deferred_.length() > 0) {
244  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
245  LDeferredCode* code = deferred_[i];
246  __ bind(code->entry());
247  Comment(";;; Deferred code @%d: %s.",
248  code->instruction_index(),
249  code->instr()->Mnemonic());
250  code->Generate();
251  __ jmp(code->exit());
252  }
253  }
254 
255  // Force constant pool emission at the end of the deferred code to make
256  // sure that no constant pools are emitted after.
257  masm()->CheckConstPool(true, false);
258 
259  return !is_aborted();
260 }
261 
262 
263 bool LCodeGen::GenerateDeoptJumpTable() {
264  // Check that the jump table is accessible from everywhere in the function
265  // code, i.e. that offsets to the table can be encoded in the 24bit signed
266  // immediate of a branch instruction.
267  // To simplify we consider the code size from the first instruction to the
268  // end of the jump table. We also don't consider the pc load delta.
269  // Each entry in the jump table generates one instruction and inlines one
270  // 32bit data after it.
271  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
272  deopt_jump_table_.length() * 2)) {
273  Abort("Generated code is too large");
274  }
275 
276  // Block the constant pool emission during the jump table emission.
277  __ BlockConstPoolFor(deopt_jump_table_.length());
278  __ RecordComment("[ Deoptimisation jump table");
279  Label table_start;
280  __ bind(&table_start);
281  for (int i = 0; i < deopt_jump_table_.length(); i++) {
282  __ bind(&deopt_jump_table_[i].label);
284  __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
285  }
286  ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
287  deopt_jump_table_.length() * 2);
288  __ RecordComment("]");
289 
290  // The deoptimization jump table is the last part of the instruction
291  // sequence. Mark the generated code as done unless we bailed out.
292  if (!is_aborted()) status_ = DONE;
293  return !is_aborted();
294 }
295 
296 
297 bool LCodeGen::GenerateSafepointTable() {
298  ASSERT(is_done());
299  safepoints_.Emit(masm(), GetStackSlotCount());
300  return !is_aborted();
301 }
302 
303 
304 Register LCodeGen::ToRegister(int index) const {
305  return Register::FromAllocationIndex(index);
306 }
307 
308 
309 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
311 }
312 
313 
314 Register LCodeGen::ToRegister(LOperand* op) const {
315  ASSERT(op->IsRegister());
316  return ToRegister(op->index());
317 }
318 
319 
320 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
321  if (op->IsRegister()) {
322  return ToRegister(op->index());
323  } else if (op->IsConstantOperand()) {
324  LConstantOperand* const_op = LConstantOperand::cast(op);
325  Handle<Object> literal = chunk_->LookupLiteral(const_op);
326  Representation r = chunk_->LookupLiteralRepresentation(const_op);
327  if (r.IsInteger32()) {
328  ASSERT(literal->IsNumber());
329  __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
330  } else if (r.IsDouble()) {
331  Abort("EmitLoadRegister: Unsupported double immediate.");
332  } else {
333  ASSERT(r.IsTagged());
334  if (literal->IsSmi()) {
335  __ mov(scratch, Operand(literal));
336  } else {
337  __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
338  }
339  }
340  return scratch;
341  } else if (op->IsStackSlot() || op->IsArgument()) {
342  __ ldr(scratch, ToMemOperand(op));
343  return scratch;
344  }
345  UNREACHABLE();
346  return scratch;
347 }
348 
349 
350 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
351  ASSERT(op->IsDoubleRegister());
352  return ToDoubleRegister(op->index());
353 }
354 
355 
356 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
357  SwVfpRegister flt_scratch,
358  DoubleRegister dbl_scratch) {
359  if (op->IsDoubleRegister()) {
360  return ToDoubleRegister(op->index());
361  } else if (op->IsConstantOperand()) {
362  LConstantOperand* const_op = LConstantOperand::cast(op);
363  Handle<Object> literal = chunk_->LookupLiteral(const_op);
364  Representation r = chunk_->LookupLiteralRepresentation(const_op);
365  if (r.IsInteger32()) {
366  ASSERT(literal->IsNumber());
367  __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
368  __ vmov(flt_scratch, ip);
369  __ vcvt_f64_s32(dbl_scratch, flt_scratch);
370  return dbl_scratch;
371  } else if (r.IsDouble()) {
372  Abort("unsupported double immediate");
373  } else if (r.IsTagged()) {
374  Abort("unsupported tagged immediate");
375  }
376  } else if (op->IsStackSlot() || op->IsArgument()) {
377  // TODO(regis): Why is vldr not taking a MemOperand?
378  // __ vldr(dbl_scratch, ToMemOperand(op));
379  MemOperand mem_op = ToMemOperand(op);
380  __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
381  return dbl_scratch;
382  }
383  UNREACHABLE();
384  return dbl_scratch;
385 }
386 
387 
388 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
389  Handle<Object> literal = chunk_->LookupLiteral(op);
390  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
391  return literal;
392 }
393 
394 
395 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
396  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
397 }
398 
399 
400 int LCodeGen::ToInteger32(LConstantOperand* op) const {
401  Handle<Object> value = chunk_->LookupLiteral(op);
402  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
403  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
404  value->Number());
405  return static_cast<int32_t>(value->Number());
406 }
407 
408 
409 double LCodeGen::ToDouble(LConstantOperand* op) const {
410  Handle<Object> value = chunk_->LookupLiteral(op);
411  return value->Number();
412 }
413 
414 
415 Operand LCodeGen::ToOperand(LOperand* op) {
416  if (op->IsConstantOperand()) {
417  LConstantOperand* const_op = LConstantOperand::cast(op);
418  Handle<Object> literal = chunk_->LookupLiteral(const_op);
419  Representation r = chunk_->LookupLiteralRepresentation(const_op);
420  if (r.IsInteger32()) {
421  ASSERT(literal->IsNumber());
422  return Operand(static_cast<int32_t>(literal->Number()));
423  } else if (r.IsDouble()) {
424  Abort("ToOperand Unsupported double immediate.");
425  }
426  ASSERT(r.IsTagged());
427  return Operand(literal);
428  } else if (op->IsRegister()) {
429  return Operand(ToRegister(op));
430  } else if (op->IsDoubleRegister()) {
431  Abort("ToOperand IsDoubleRegister unimplemented");
432  return Operand(0);
433  }
434  // Stack slots not implemented, use ToMemOperand instead.
435  UNREACHABLE();
436  return Operand(0);
437 }
438 
439 
440 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
441  ASSERT(!op->IsRegister());
442  ASSERT(!op->IsDoubleRegister());
443  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
444  int index = op->index();
445  if (index >= 0) {
446  // Local or spill slot. Skip the frame pointer, function, and
447  // context in the fixed part of the frame.
448  return MemOperand(fp, -(index + 3) * kPointerSize);
449  } else {
450  // Incoming parameter. Skip the return address.
451  return MemOperand(fp, -(index - 1) * kPointerSize);
452  }
453 }
454 
455 
456 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
457  ASSERT(op->IsDoubleStackSlot());
458  int index = op->index();
459  if (index >= 0) {
460  // Local or spill slot. Skip the frame pointer, function, context,
461  // and the first word of the double in the fixed part of the frame.
462  return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
463  } else {
464  // Incoming parameter. Skip the return address and the first word of
465  // the double.
466  return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
467  }
468 }
469 
470 
471 void LCodeGen::WriteTranslation(LEnvironment* environment,
472  Translation* translation) {
473  if (environment == NULL) return;
474 
475  // The translation includes one command per value in the environment.
476  int translation_size = environment->values()->length();
477  // The output frame height does not include the parameters.
478  int height = translation_size - environment->parameter_count();
479 
480  WriteTranslation(environment->outer(), translation);
481  int closure_id = DefineDeoptimizationLiteral(environment->closure());
482  switch (environment->frame_type()) {
483  case JS_FUNCTION:
484  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
485  break;
486  case JS_CONSTRUCT:
487  translation->BeginConstructStubFrame(closure_id, translation_size);
488  break;
489  case ARGUMENTS_ADAPTOR:
490  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
491  break;
492  default:
493  UNREACHABLE();
494  }
495  for (int i = 0; i < translation_size; ++i) {
496  LOperand* value = environment->values()->at(i);
497  // spilled_registers_ and spilled_double_registers_ are either
498  // both NULL or both set.
499  if (environment->spilled_registers() != NULL && value != NULL) {
500  if (value->IsRegister() &&
501  environment->spilled_registers()[value->index()] != NULL) {
502  translation->MarkDuplicate();
503  AddToTranslation(translation,
504  environment->spilled_registers()[value->index()],
505  environment->HasTaggedValueAt(i));
506  } else if (
507  value->IsDoubleRegister() &&
508  environment->spilled_double_registers()[value->index()] != NULL) {
509  translation->MarkDuplicate();
510  AddToTranslation(
511  translation,
512  environment->spilled_double_registers()[value->index()],
513  false);
514  }
515  }
516 
517  AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
518  }
519 }
520 
521 
522 void LCodeGen::AddToTranslation(Translation* translation,
523  LOperand* op,
524  bool is_tagged) {
525  if (op == NULL) {
526  // TODO(twuerthinger): Introduce marker operands to indicate that this value
527  // is not present and must be reconstructed from the deoptimizer. Currently
528  // this is only used for the arguments object.
529  translation->StoreArgumentsObject();
530  } else if (op->IsStackSlot()) {
531  if (is_tagged) {
532  translation->StoreStackSlot(op->index());
533  } else {
534  translation->StoreInt32StackSlot(op->index());
535  }
536  } else if (op->IsDoubleStackSlot()) {
537  translation->StoreDoubleStackSlot(op->index());
538  } else if (op->IsArgument()) {
539  ASSERT(is_tagged);
540  int src_index = GetStackSlotCount() + op->index();
541  translation->StoreStackSlot(src_index);
542  } else if (op->IsRegister()) {
543  Register reg = ToRegister(op);
544  if (is_tagged) {
545  translation->StoreRegister(reg);
546  } else {
547  translation->StoreInt32Register(reg);
548  }
549  } else if (op->IsDoubleRegister()) {
550  DoubleRegister reg = ToDoubleRegister(op);
551  translation->StoreDoubleRegister(reg);
552  } else if (op->IsConstantOperand()) {
553  Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
554  int src_index = DefineDeoptimizationLiteral(literal);
555  translation->StoreLiteral(src_index);
556  } else {
557  UNREACHABLE();
558  }
559 }
560 
561 
562 void LCodeGen::CallCode(Handle<Code> code,
563  RelocInfo::Mode mode,
564  LInstruction* instr) {
565  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
566 }
567 
568 
569 void LCodeGen::CallCodeGeneric(Handle<Code> code,
570  RelocInfo::Mode mode,
571  LInstruction* instr,
572  SafepointMode safepoint_mode) {
573  ASSERT(instr != NULL);
574  // Block literal pool emission to ensure nop indicating no inlined smi code
575  // is in the correct position.
576  Assembler::BlockConstPoolScope block_const_pool(masm());
577  LPointerMap* pointers = instr->pointer_map();
578  RecordPosition(pointers->position());
579  __ Call(code, mode);
580  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
581 
582  // Signal that we don't inline smi code before these stubs in the
583  // optimizing code generator.
584  if (code->kind() == Code::BINARY_OP_IC ||
585  code->kind() == Code::COMPARE_IC) {
586  __ nop();
587  }
588 }
589 
590 
591 void LCodeGen::CallRuntime(const Runtime::Function* function,
592  int num_arguments,
593  LInstruction* instr) {
594  ASSERT(instr != NULL);
595  LPointerMap* pointers = instr->pointer_map();
596  ASSERT(pointers != NULL);
597  RecordPosition(pointers->position());
598 
599  __ CallRuntime(function, num_arguments);
600  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
601 }
602 
603 
604 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
605  int argc,
606  LInstruction* instr) {
607  __ CallRuntimeSaveDoubles(id);
608  RecordSafepointWithRegisters(
609  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
610 }
611 
612 
613 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
614  Safepoint::DeoptMode mode) {
615  if (!environment->HasBeenRegistered()) {
616  // Physical stack frame layout:
617  // -x ............. -4 0 ..................................... y
618  // [incoming arguments] [spill slots] [pushed outgoing arguments]
619 
620  // Layout of the environment:
621  // 0 ..................................................... size-1
622  // [parameters] [locals] [expression stack including arguments]
623 
624  // Layout of the translation:
625  // 0 ........................................................ size - 1 + 4
626  // [expression stack including arguments] [locals] [4 words] [parameters]
627  // |>------------ translation_size ------------<|
628 
629  int frame_count = 0;
630  int jsframe_count = 0;
631  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
632  ++frame_count;
633  if (e->frame_type() == JS_FUNCTION) {
634  ++jsframe_count;
635  }
636  }
637  Translation translation(&translations_, frame_count, jsframe_count,
638  zone());
639  WriteTranslation(environment, &translation);
640  int deoptimization_index = deoptimizations_.length();
641  int pc_offset = masm()->pc_offset();
642  environment->Register(deoptimization_index,
643  translation.index(),
644  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
645  deoptimizations_.Add(environment, zone());
646  }
647 }
648 
649 
650 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
651  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
652  ASSERT(environment->HasBeenRegistered());
653  int id = environment->deoptimization_index();
655  if (entry == NULL) {
656  Abort("bailout was not prepared");
657  return;
658  }
659 
660  ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
661 
662  if (FLAG_deopt_every_n_times == 1 &&
663  info_->shared_info()->opt_count() == id) {
664  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
665  return;
666  }
667 
668  if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
669 
670  if (cc == al) {
671  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
672  } else {
673  // We often have several deopts to the same entry, reuse the last
674  // jump entry if this is the case.
675  if (deopt_jump_table_.is_empty() ||
676  (deopt_jump_table_.last().address != entry)) {
677  deopt_jump_table_.Add(JumpTableEntry(entry), zone());
678  }
679  __ b(cc, &deopt_jump_table_.last().label);
680  }
681 }
682 
683 
684 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
685  int length = deoptimizations_.length();
686  if (length == 0) return;
687  Handle<DeoptimizationInputData> data =
688  factory()->NewDeoptimizationInputData(length, TENURED);
689 
690  Handle<ByteArray> translations = translations_.CreateByteArray();
691  data->SetTranslationByteArray(*translations);
692  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
693 
694  Handle<FixedArray> literals =
695  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
696  for (int i = 0; i < deoptimization_literals_.length(); i++) {
697  literals->set(i, *deoptimization_literals_[i]);
698  }
699  data->SetLiteralArray(*literals);
700 
701  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
702  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
703 
704  // Populate the deoptimization entries.
705  for (int i = 0; i < length; i++) {
706  LEnvironment* env = deoptimizations_[i];
707  data->SetAstId(i, Smi::FromInt(env->ast_id()));
708  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
709  data->SetArgumentsStackHeight(i,
710  Smi::FromInt(env->arguments_stack_height()));
711  data->SetPc(i, Smi::FromInt(env->pc_offset()));
712  }
713  code->set_deoptimization_data(*data);
714 }
715 
716 
717 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
718  int result = deoptimization_literals_.length();
719  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
720  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
721  }
722  deoptimization_literals_.Add(literal, zone());
723  return result;
724 }
725 
726 
727 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
728  ASSERT(deoptimization_literals_.length() == 0);
729 
730  const ZoneList<Handle<JSFunction> >* inlined_closures =
731  chunk()->inlined_closures();
732 
733  for (int i = 0, length = inlined_closures->length();
734  i < length;
735  i++) {
736  DefineDeoptimizationLiteral(inlined_closures->at(i));
737  }
738 
739  inlined_function_count_ = deoptimization_literals_.length();
740 }
741 
742 
743 void LCodeGen::RecordSafepointWithLazyDeopt(
744  LInstruction* instr, SafepointMode safepoint_mode) {
745  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
746  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
747  } else {
748  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
749  RecordSafepointWithRegisters(
750  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
751  }
752 }
753 
754 
755 void LCodeGen::RecordSafepoint(
756  LPointerMap* pointers,
757  Safepoint::Kind kind,
758  int arguments,
759  Safepoint::DeoptMode deopt_mode) {
760  ASSERT(expected_safepoint_kind_ == kind);
761 
762  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
763  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
764  kind, arguments, deopt_mode);
765  for (int i = 0; i < operands->length(); i++) {
766  LOperand* pointer = operands->at(i);
767  if (pointer->IsStackSlot()) {
768  safepoint.DefinePointerSlot(pointer->index(), zone());
769  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
770  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
771  }
772  }
773  if (kind & Safepoint::kWithRegisters) {
774  // Register cp always contains a pointer to the context.
775  safepoint.DefinePointerRegister(cp, zone());
776  }
777 }
778 
779 
780 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
781  Safepoint::DeoptMode deopt_mode) {
782  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
783 }
784 
785 
786 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
787  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
788  RecordSafepoint(&empty_pointers, deopt_mode);
789 }
790 
791 
792 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
793  int arguments,
794  Safepoint::DeoptMode deopt_mode) {
795  RecordSafepoint(
796  pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
797 }
798 
799 
800 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
801  LPointerMap* pointers,
802  int arguments,
803  Safepoint::DeoptMode deopt_mode) {
804  RecordSafepoint(
805  pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
806 }
807 
808 
809 void LCodeGen::RecordPosition(int position) {
810  if (position == RelocInfo::kNoPosition) return;
811  masm()->positions_recorder()->RecordPosition(position);
812 }
813 
814 
815 void LCodeGen::DoLabel(LLabel* label) {
816  if (label->is_loop_header()) {
817  Comment(";;; B%d - LOOP entry", label->block_id());
818  } else {
819  Comment(";;; B%d", label->block_id());
820  }
821  __ bind(label->label());
822  current_block_ = label->block_id();
823  DoGap(label);
824 }
825 
826 
827 void LCodeGen::DoParallelMove(LParallelMove* move) {
828  resolver_.Resolve(move);
829 }
830 
831 
832 void LCodeGen::DoGap(LGap* gap) {
833  for (int i = LGap::FIRST_INNER_POSITION;
835  i++) {
836  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
837  LParallelMove* move = gap->GetParallelMove(inner_pos);
838  if (move != NULL) DoParallelMove(move);
839  }
840 }
841 
842 
843 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
844  DoGap(instr);
845 }
846 
847 
848 void LCodeGen::DoParameter(LParameter* instr) {
849  // Nothing to do.
850 }
851 
852 
853 void LCodeGen::DoCallStub(LCallStub* instr) {
854  ASSERT(ToRegister(instr->result()).is(r0));
855  switch (instr->hydrogen()->major_key()) {
856  case CodeStub::RegExpConstructResult: {
857  RegExpConstructResultStub stub;
858  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
859  break;
860  }
861  case CodeStub::RegExpExec: {
862  RegExpExecStub stub;
863  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
864  break;
865  }
866  case CodeStub::SubString: {
867  SubStringStub stub;
868  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
869  break;
870  }
871  case CodeStub::NumberToString: {
872  NumberToStringStub stub;
873  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
874  break;
875  }
876  case CodeStub::StringAdd: {
877  StringAddStub stub(NO_STRING_ADD_FLAGS);
878  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
879  break;
880  }
881  case CodeStub::StringCompare: {
882  StringCompareStub stub;
883  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
884  break;
885  }
886  case CodeStub::TranscendentalCache: {
887  __ ldr(r0, MemOperand(sp, 0));
888  TranscendentalCacheStub stub(instr->transcendental_type(),
890  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
891  break;
892  }
893  default:
894  UNREACHABLE();
895  }
896 }
897 
898 
899 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
900  // Nothing to do.
901 }
902 
903 
904 void LCodeGen::DoModI(LModI* instr) {
905  if (instr->hydrogen()->HasPowerOf2Divisor()) {
906  Register dividend = ToRegister(instr->InputAt(0));
907  Register result = ToRegister(instr->result());
908 
909  int32_t divisor =
910  HConstant::cast(instr->hydrogen()->right())->Integer32Value();
911 
912  if (divisor < 0) divisor = -divisor;
913 
914  Label positive_dividend, done;
915  __ cmp(dividend, Operand(0));
916  __ b(pl, &positive_dividend);
917  __ rsb(result, dividend, Operand(0));
918  __ and_(result, result, Operand(divisor - 1), SetCC);
919  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
920  DeoptimizeIf(eq, instr->environment());
921  }
922  __ rsb(result, result, Operand(0));
923  __ b(&done);
924  __ bind(&positive_dividend);
925  __ and_(result, dividend, Operand(divisor - 1));
926  __ bind(&done);
927  return;
928  }
929 
930  // These registers hold untagged 32 bit values.
931  Register left = ToRegister(instr->InputAt(0));
932  Register right = ToRegister(instr->InputAt(1));
933  Register result = ToRegister(instr->result());
934 
935  Register scratch = scratch0();
936  Register scratch2 = ToRegister(instr->TempAt(0));
937  DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
938  DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
939  DwVfpRegister quotient = double_scratch0();
940 
941  ASSERT(!dividend.is(divisor));
942  ASSERT(!dividend.is(quotient));
943  ASSERT(!divisor.is(quotient));
944  ASSERT(!scratch.is(left));
945  ASSERT(!scratch.is(right));
946  ASSERT(!scratch.is(result));
947 
948  Label done, vfp_modulo, both_positive, right_negative;
949 
950  // Check for x % 0.
951  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
952  __ cmp(right, Operand(0));
953  DeoptimizeIf(eq, instr->environment());
954  }
955 
956  __ Move(result, left);
957 
958  // (0 % x) must yield 0 (if x is finite, which is the case here).
959  __ cmp(left, Operand(0));
960  __ b(eq, &done);
961  // Preload right in a vfp register.
962  __ vmov(divisor.low(), right);
963  __ b(lt, &vfp_modulo);
964 
965  __ cmp(left, Operand(right));
966  __ b(lt, &done);
967 
968  // Check for (positive) power of two on the right hand side.
969  __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
970  scratch,
971  &right_negative,
972  &both_positive);
973  // Perform modulo operation (scratch contains right - 1).
974  __ and_(result, scratch, Operand(left));
975  __ b(&done);
976 
977  __ bind(&right_negative);
978  // Negate right. The sign of the divisor does not matter.
979  __ rsb(right, right, Operand(0));
980 
981  __ bind(&both_positive);
982  const int kUnfolds = 3;
983  // If the right hand side is smaller than the (nonnegative)
984  // left hand side, the left hand side is the result.
985  // Else try a few subtractions of the left hand side.
986  __ mov(scratch, left);
987  for (int i = 0; i < kUnfolds; i++) {
988  // Check if the left hand side is less or equal than the
989  // the right hand side.
990  __ cmp(scratch, Operand(right));
991  __ mov(result, scratch, LeaveCC, lt);
992  __ b(lt, &done);
993  // If not, reduce the left hand side by the right hand
994  // side and check again.
995  if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
996  }
997 
998  __ bind(&vfp_modulo);
999  // Load the arguments in VFP registers.
1000  // The divisor value is preloaded before. Be careful that 'right' is only live
1001  // on entry.
1002  __ vmov(dividend.low(), left);
1003  // From here on don't use right as it may have been reallocated (for example
1004  // to scratch2).
1005  right = no_reg;
1006 
1007  __ vcvt_f64_s32(dividend, dividend.low());
1008  __ vcvt_f64_s32(divisor, divisor.low());
1009 
1010  // We do not care about the sign of the divisor.
1011  __ vabs(divisor, divisor);
1012  // Compute the quotient and round it to a 32bit integer.
1013  __ vdiv(quotient, dividend, divisor);
1014  __ vcvt_s32_f64(quotient.low(), quotient);
1015  __ vcvt_f64_s32(quotient, quotient.low());
1016 
1017  // Compute the remainder in result.
1018  DwVfpRegister double_scratch = dividend;
1019  __ vmul(double_scratch, divisor, quotient);
1020  __ vcvt_s32_f64(double_scratch.low(), double_scratch);
1021  __ vmov(scratch, double_scratch.low());
1022 
1023  if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1024  __ sub(result, left, scratch);
1025  } else {
1026  Label ok;
1027  // Check for -0.
1028  __ sub(scratch2, left, scratch, SetCC);
1029  __ b(ne, &ok);
1030  __ cmp(left, Operand(0));
1031  DeoptimizeIf(mi, instr->environment());
1032  __ bind(&ok);
1033  // Load the result and we are done.
1034  __ mov(result, scratch2);
1035  }
1036 
1037  __ bind(&done);
1038 }
1039 
1040 
1041 void LCodeGen::EmitSignedIntegerDivisionByConstant(
1042  Register result,
1043  Register dividend,
1044  int32_t divisor,
1045  Register remainder,
1046  Register scratch,
1047  LEnvironment* environment) {
1048  ASSERT(!AreAliased(dividend, scratch, ip));
1049  ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1050 
1051  uint32_t divisor_abs = abs(divisor);
1052 
1053  int32_t power_of_2_factor =
1054  CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1055 
1056  switch (divisor_abs) {
1057  case 0:
1058  DeoptimizeIf(al, environment);
1059  return;
1060 
1061  case 1:
1062  if (divisor > 0) {
1063  __ Move(result, dividend);
1064  } else {
1065  __ rsb(result, dividend, Operand(0), SetCC);
1066  DeoptimizeIf(vs, environment);
1067  }
1068  // Compute the remainder.
1069  __ mov(remainder, Operand(0));
1070  return;
1071 
1072  default:
1073  if (IsPowerOf2(divisor_abs)) {
1074  // Branch and condition free code for integer division by a power
1075  // of two.
1076  int32_t power = WhichPowerOf2(divisor_abs);
1077  if (power > 1) {
1078  __ mov(scratch, Operand(dividend, ASR, power - 1));
1079  }
1080  __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
1081  __ mov(result, Operand(scratch, ASR, power));
1082  // Negate if necessary.
1083  // We don't need to check for overflow because the case '-1' is
1084  // handled separately.
1085  if (divisor < 0) {
1086  ASSERT(divisor != -1);
1087  __ rsb(result, result, Operand(0));
1088  }
1089  // Compute the remainder.
1090  if (divisor > 0) {
1091  __ sub(remainder, dividend, Operand(result, LSL, power));
1092  } else {
1093  __ add(remainder, dividend, Operand(result, LSL, power));
1094  }
1095  return;
1096  } else {
1097  // Use magic numbers for a few specific divisors.
1098  // Details and proofs can be found in:
1099  // - Hacker's Delight, Henry S. Warren, Jr.
1100  // - The PowerPC Compiler Writer’s Guide
1101  // and probably many others.
1102  //
1103  // We handle
1104  // <divisor with magic numbers> * <power of 2>
1105  // but not
1106  // <divisor with magic numbers> * <other divisor with magic numbers>
1107  DivMagicNumbers magic_numbers =
1108  DivMagicNumberFor(divisor_abs >> power_of_2_factor);
1109  // Branch and condition free code for integer division by a power
1110  // of two.
1111  const int32_t M = magic_numbers.M;
1112  const int32_t s = magic_numbers.s + power_of_2_factor;
1113 
1114  __ mov(ip, Operand(M));
1115  __ smull(ip, scratch, dividend, ip);
1116  if (M < 0) {
1117  __ add(scratch, scratch, Operand(dividend));
1118  }
1119  if (s > 0) {
1120  __ mov(scratch, Operand(scratch, ASR, s));
1121  }
1122  __ add(result, scratch, Operand(dividend, LSR, 31));
1123  if (divisor < 0) __ rsb(result, result, Operand(0));
1124  // Compute the remainder.
1125  __ mov(ip, Operand(divisor));
1126  // This sequence could be replaced with 'mls' when
1127  // it gets implemented.
1128  __ mul(scratch, result, ip);
1129  __ sub(remainder, dividend, scratch);
1130  }
1131  }
1132 }
1133 
1134 
1135 void LCodeGen::DoDivI(LDivI* instr) {
1136  class DeferredDivI: public LDeferredCode {
1137  public:
1138  DeferredDivI(LCodeGen* codegen, LDivI* instr)
1139  : LDeferredCode(codegen), instr_(instr) { }
1140  virtual void Generate() {
1141  codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
1142  }
1143  virtual LInstruction* instr() { return instr_; }
1144  private:
1145  LDivI* instr_;
1146  };
1147 
1148  const Register left = ToRegister(instr->InputAt(0));
1149  const Register right = ToRegister(instr->InputAt(1));
1150  const Register scratch = scratch0();
1151  const Register result = ToRegister(instr->result());
1152 
1153  // Check for x / 0.
1154  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1155  __ cmp(right, Operand(0));
1156  DeoptimizeIf(eq, instr->environment());
1157  }
1158 
1159  // Check for (0 / -x) that will produce negative zero.
1160  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1161  Label left_not_zero;
1162  __ cmp(left, Operand(0));
1163  __ b(ne, &left_not_zero);
1164  __ cmp(right, Operand(0));
1165  DeoptimizeIf(mi, instr->environment());
1166  __ bind(&left_not_zero);
1167  }
1168 
1169  // Check for (-kMinInt / -1).
1170  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1171  Label left_not_min_int;
1172  __ cmp(left, Operand(kMinInt));
1173  __ b(ne, &left_not_min_int);
1174  __ cmp(right, Operand(-1));
1175  DeoptimizeIf(eq, instr->environment());
1176  __ bind(&left_not_min_int);
1177  }
1178 
1179  Label done, deoptimize;
1180  // Test for a few common cases first.
1181  __ cmp(right, Operand(1));
1182  __ mov(result, left, LeaveCC, eq);
1183  __ b(eq, &done);
1184 
1185  __ cmp(right, Operand(2));
1186  __ tst(left, Operand(1), eq);
1187  __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
1188  __ b(eq, &done);
1189 
1190  __ cmp(right, Operand(4));
1191  __ tst(left, Operand(3), eq);
1192  __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
1193  __ b(eq, &done);
1194 
1195  // Call the stub. The numbers in r0 and r1 have
1196  // to be tagged to Smis. If that is not possible, deoptimize.
1197  DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
1198 
1199  __ TrySmiTag(left, &deoptimize, scratch);
1200  __ TrySmiTag(right, &deoptimize, scratch);
1201 
1202  __ b(al, deferred->entry());
1203  __ bind(deferred->exit());
1204 
1205  // If the result in r0 is a Smi, untag it, else deoptimize.
1206  __ JumpIfNotSmi(result, &deoptimize);
1207  __ SmiUntag(result);
1208  __ b(&done);
1209 
1210  __ bind(&deoptimize);
1211  DeoptimizeIf(al, instr->environment());
1212  __ bind(&done);
1213 }
1214 
1215 
1216 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1217  const Register result = ToRegister(instr->result());
1218  const Register left = ToRegister(instr->InputAt(0));
1219  const Register remainder = ToRegister(instr->TempAt(0));
1220  const Register scratch = scratch0();
1221 
1222  // We only optimize this for division by constants, because the standard
1223  // integer division routine is usually slower than transitionning to VFP.
1224  // This could be optimized on processors with SDIV available.
1225  ASSERT(instr->InputAt(1)->IsConstantOperand());
1226  int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
1227  if (divisor < 0) {
1228  __ cmp(left, Operand(0));
1229  DeoptimizeIf(eq, instr->environment());
1230  }
1231  EmitSignedIntegerDivisionByConstant(result,
1232  left,
1233  divisor,
1234  remainder,
1235  scratch,
1236  instr->environment());
1237  // We operated a truncating division. Correct the result if necessary.
1238  __ cmp(remainder, Operand(0));
1239  __ teq(remainder, Operand(divisor), ne);
1240  __ sub(result, result, Operand(1), LeaveCC, mi);
1241 }
1242 
1243 
1244 template<int T>
1245 void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
1246  Token::Value op) {
1247  Register left = ToRegister(instr->InputAt(0));
1248  Register right = ToRegister(instr->InputAt(1));
1249 
1250  PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
1251  // Move left to r1 and right to r0 for the stub call.
1252  if (left.is(r1)) {
1253  __ Move(r0, right);
1254  } else if (left.is(r0) && right.is(r1)) {
1255  __ Swap(r0, r1, r2);
1256  } else if (left.is(r0)) {
1257  ASSERT(!right.is(r1));
1258  __ mov(r1, r0);
1259  __ mov(r0, right);
1260  } else {
1261  ASSERT(!left.is(r0) && !right.is(r0));
1262  __ mov(r0, right);
1263  __ mov(r1, left);
1264  }
1265  BinaryOpStub stub(op, OVERWRITE_LEFT);
1266  __ CallStub(&stub);
1267  RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
1268  0,
1269  Safepoint::kNoLazyDeopt);
1270  // Overwrite the stored value of r0 with the result of the stub.
1271  __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
1272 }
1273 
1274 
1275 void LCodeGen::DoMulI(LMulI* instr) {
1276  Register scratch = scratch0();
1277  Register result = ToRegister(instr->result());
1278  // Note that result may alias left.
1279  Register left = ToRegister(instr->InputAt(0));
1280  LOperand* right_op = instr->InputAt(1);
1281 
1282  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1283  bool bailout_on_minus_zero =
1284  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1285 
1286  if (right_op->IsConstantOperand() && !can_overflow) {
1287  // Use optimized code for specific constants.
1288  int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1289 
1290  if (bailout_on_minus_zero && (constant < 0)) {
1291  // The case of a null constant will be handled separately.
1292  // If constant is negative and left is null, the result should be -0.
1293  __ cmp(left, Operand(0));
1294  DeoptimizeIf(eq, instr->environment());
1295  }
1296 
1297  switch (constant) {
1298  case -1:
1299  __ rsb(result, left, Operand(0));
1300  break;
1301  case 0:
1302  if (bailout_on_minus_zero) {
1303  // If left is strictly negative and the constant is null, the
1304  // result is -0. Deoptimize if required, otherwise return 0.
1305  __ cmp(left, Operand(0));
1306  DeoptimizeIf(mi, instr->environment());
1307  }
1308  __ mov(result, Operand(0));
1309  break;
1310  case 1:
1311  __ Move(result, left);
1312  break;
1313  default:
1314  // Multiplying by powers of two and powers of two plus or minus
1315  // one can be done faster with shifted operands.
1316  // For other constants we emit standard code.
1317  int32_t mask = constant >> 31;
1318  uint32_t constant_abs = (constant + mask) ^ mask;
1319 
1320  if (IsPowerOf2(constant_abs) ||
1321  IsPowerOf2(constant_abs - 1) ||
1322  IsPowerOf2(constant_abs + 1)) {
1323  if (IsPowerOf2(constant_abs)) {
1324  int32_t shift = WhichPowerOf2(constant_abs);
1325  __ mov(result, Operand(left, LSL, shift));
1326  } else if (IsPowerOf2(constant_abs - 1)) {
1327  int32_t shift = WhichPowerOf2(constant_abs - 1);
1328  __ add(result, left, Operand(left, LSL, shift));
1329  } else if (IsPowerOf2(constant_abs + 1)) {
1330  int32_t shift = WhichPowerOf2(constant_abs + 1);
1331  __ rsb(result, left, Operand(left, LSL, shift));
1332  }
1333 
1334  // Correct the sign of the result is the constant is negative.
1335  if (constant < 0) __ rsb(result, result, Operand(0));
1336 
1337  } else {
1338  // Generate standard code.
1339  __ mov(ip, Operand(constant));
1340  __ mul(result, left, ip);
1341  }
1342  }
1343 
1344  } else {
1345  Register right = EmitLoadRegister(right_op, scratch);
1346  if (bailout_on_minus_zero) {
1347  __ orr(ToRegister(instr->TempAt(0)), left, right);
1348  }
1349 
1350  if (can_overflow) {
1351  // scratch:result = left * right.
1352  __ smull(result, scratch, left, right);
1353  __ cmp(scratch, Operand(result, ASR, 31));
1354  DeoptimizeIf(ne, instr->environment());
1355  } else {
1356  __ mul(result, left, right);
1357  }
1358 
1359  if (bailout_on_minus_zero) {
1360  // Bail out if the result is supposed to be negative zero.
1361  Label done;
1362  __ cmp(result, Operand(0));
1363  __ b(ne, &done);
1364  __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
1365  DeoptimizeIf(mi, instr->environment());
1366  __ bind(&done);
1367  }
1368  }
1369 }
1370 
1371 
1372 void LCodeGen::DoBitI(LBitI* instr) {
1373  LOperand* left_op = instr->InputAt(0);
1374  LOperand* right_op = instr->InputAt(1);
1375  ASSERT(left_op->IsRegister());
1376  Register left = ToRegister(left_op);
1377  Register result = ToRegister(instr->result());
1378  Operand right(no_reg);
1379 
1380  if (right_op->IsStackSlot() || right_op->IsArgument()) {
1381  right = Operand(EmitLoadRegister(right_op, ip));
1382  } else {
1383  ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1384  right = ToOperand(right_op);
1385  }
1386 
1387  switch (instr->op()) {
1388  case Token::BIT_AND:
1389  __ and_(result, left, right);
1390  break;
1391  case Token::BIT_OR:
1392  __ orr(result, left, right);
1393  break;
1394  case Token::BIT_XOR:
1395  __ eor(result, left, right);
1396  break;
1397  default:
1398  UNREACHABLE();
1399  break;
1400  }
1401 }
1402 
1403 
1404 void LCodeGen::DoShiftI(LShiftI* instr) {
1405  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1406  // result may alias either of them.
1407  LOperand* right_op = instr->InputAt(1);
1408  Register left = ToRegister(instr->InputAt(0));
1409  Register result = ToRegister(instr->result());
1410  Register scratch = scratch0();
1411  if (right_op->IsRegister()) {
1412  // Mask the right_op operand.
1413  __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1414  switch (instr->op()) {
1415  case Token::SAR:
1416  __ mov(result, Operand(left, ASR, scratch));
1417  break;
1418  case Token::SHR:
1419  if (instr->can_deopt()) {
1420  __ mov(result, Operand(left, LSR, scratch), SetCC);
1421  DeoptimizeIf(mi, instr->environment());
1422  } else {
1423  __ mov(result, Operand(left, LSR, scratch));
1424  }
1425  break;
1426  case Token::SHL:
1427  __ mov(result, Operand(left, LSL, scratch));
1428  break;
1429  default:
1430  UNREACHABLE();
1431  break;
1432  }
1433  } else {
1434  // Mask the right_op operand.
1435  int value = ToInteger32(LConstantOperand::cast(right_op));
1436  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1437  switch (instr->op()) {
1438  case Token::SAR:
1439  if (shift_count != 0) {
1440  __ mov(result, Operand(left, ASR, shift_count));
1441  } else {
1442  __ Move(result, left);
1443  }
1444  break;
1445  case Token::SHR:
1446  if (shift_count != 0) {
1447  __ mov(result, Operand(left, LSR, shift_count));
1448  } else {
1449  if (instr->can_deopt()) {
1450  __ tst(left, Operand(0x80000000));
1451  DeoptimizeIf(ne, instr->environment());
1452  }
1453  __ Move(result, left);
1454  }
1455  break;
1456  case Token::SHL:
1457  if (shift_count != 0) {
1458  __ mov(result, Operand(left, LSL, shift_count));
1459  } else {
1460  __ Move(result, left);
1461  }
1462  break;
1463  default:
1464  UNREACHABLE();
1465  break;
1466  }
1467  }
1468 }
1469 
1470 
1471 void LCodeGen::DoSubI(LSubI* instr) {
1472  LOperand* left = instr->InputAt(0);
1473  LOperand* right = instr->InputAt(1);
1474  LOperand* result = instr->result();
1475  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1476  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1477 
1478  if (right->IsStackSlot() || right->IsArgument()) {
1479  Register right_reg = EmitLoadRegister(right, ip);
1480  __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1481  } else {
1482  ASSERT(right->IsRegister() || right->IsConstantOperand());
1483  __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1484  }
1485 
1486  if (can_overflow) {
1487  DeoptimizeIf(vs, instr->environment());
1488  }
1489 }
1490 
1491 
1492 void LCodeGen::DoConstantI(LConstantI* instr) {
1493  ASSERT(instr->result()->IsRegister());
1494  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1495 }
1496 
1497 
1498 void LCodeGen::DoConstantD(LConstantD* instr) {
1499  ASSERT(instr->result()->IsDoubleRegister());
1500  DwVfpRegister result = ToDoubleRegister(instr->result());
1501  double v = instr->value();
1502  __ Vmov(result, v);
1503 }
1504 
1505 
1506 void LCodeGen::DoConstantT(LConstantT* instr) {
1507  Handle<Object> value = instr->value();
1508  if (value->IsSmi()) {
1509  __ mov(ToRegister(instr->result()), Operand(value));
1510  } else {
1511  __ LoadHeapObject(ToRegister(instr->result()),
1512  Handle<HeapObject>::cast(value));
1513  }
1514 }
1515 
1516 
1517 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1518  Register result = ToRegister(instr->result());
1519  Register array = ToRegister(instr->InputAt(0));
1520  __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
1521 }
1522 
1523 
1524 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1525  Register result = ToRegister(instr->result());
1526  Register array = ToRegister(instr->InputAt(0));
1527  __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1528 }
1529 
1530 
1531 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1532  Register result = ToRegister(instr->result());
1533  Register input = ToRegister(instr->InputAt(0));
1534 
1535  // Load map into |result|.
1536  __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
1537  // Load the map's "bit field 2" into |result|. We only need the first byte,
1538  // but the following bit field extraction takes care of that anyway.
1539  __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
1540  // Retrieve elements_kind from bit field 2.
1541  __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1542 }
1543 
1544 
1545 void LCodeGen::DoValueOf(LValueOf* instr) {
1546  Register input = ToRegister(instr->InputAt(0));
1547  Register result = ToRegister(instr->result());
1548  Register map = ToRegister(instr->TempAt(0));
1549  Label done;
1550 
1551  // If the object is a smi return the object.
1552  __ tst(input, Operand(kSmiTagMask));
1553  __ Move(result, input, eq);
1554  __ b(eq, &done);
1555 
1556  // If the object is not a value type, return the object.
1557  __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
1558  __ Move(result, input, ne);
1559  __ b(ne, &done);
1560  __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
1561 
1562  __ bind(&done);
1563 }
1564 
1565 
1566 void LCodeGen::DoDateField(LDateField* instr) {
1567  Register object = ToRegister(instr->InputAt(0));
1568  Register result = ToRegister(instr->result());
1569  Register scratch = ToRegister(instr->TempAt(0));
1570  Smi* index = instr->index();
1571  Label runtime, done;
1572  ASSERT(object.is(result));
1573  ASSERT(object.is(r0));
1574  ASSERT(!scratch.is(scratch0()));
1575  ASSERT(!scratch.is(object));
1576 
1577 #ifdef DEBUG
1578  __ AbortIfSmi(object);
1579  __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1580  __ Assert(eq, "Trying to get date field from non-date.");
1581 #endif
1582 
1583  if (index->value() == 0) {
1584  __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1585  } else {
1586  if (index->value() < JSDate::kFirstUncachedField) {
1587  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1588  __ mov(scratch, Operand(stamp));
1589  __ ldr(scratch, MemOperand(scratch));
1590  __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1591  __ cmp(scratch, scratch0());
1592  __ b(ne, &runtime);
1593  __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1594  kPointerSize * index->value()));
1595  __ jmp(&done);
1596  }
1597  __ bind(&runtime);
1598  __ PrepareCallCFunction(2, scratch);
1599  __ mov(r1, Operand(index));
1600  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1601  __ bind(&done);
1602  }
1603 }
1604 
1605 
1606 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1607  Register input = ToRegister(instr->InputAt(0));
1608  Register result = ToRegister(instr->result());
1609  __ mvn(result, Operand(input));
1610 }
1611 
1612 
1613 void LCodeGen::DoThrow(LThrow* instr) {
1614  Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
1615  __ push(input_reg);
1616  CallRuntime(Runtime::kThrow, 1, instr);
1617 
1618  if (FLAG_debug_code) {
1619  __ stop("Unreachable code.");
1620  }
1621 }
1622 
1623 
1624 void LCodeGen::DoAddI(LAddI* instr) {
1625  LOperand* left = instr->InputAt(0);
1626  LOperand* right = instr->InputAt(1);
1627  LOperand* result = instr->result();
1628  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1629  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1630 
1631  if (right->IsStackSlot() || right->IsArgument()) {
1632  Register right_reg = EmitLoadRegister(right, ip);
1633  __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1634  } else {
1635  ASSERT(right->IsRegister() || right->IsConstantOperand());
1636  __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1637  }
1638 
1639  if (can_overflow) {
1640  DeoptimizeIf(vs, instr->environment());
1641  }
1642 }
1643 
1644 
1645 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1646  DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1647  DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1648  DoubleRegister result = ToDoubleRegister(instr->result());
1649  switch (instr->op()) {
1650  case Token::ADD:
1651  __ vadd(result, left, right);
1652  break;
1653  case Token::SUB:
1654  __ vsub(result, left, right);
1655  break;
1656  case Token::MUL:
1657  __ vmul(result, left, right);
1658  break;
1659  case Token::DIV:
1660  __ vdiv(result, left, right);
1661  break;
1662  case Token::MOD: {
1663  // Save r0-r3 on the stack.
1664  __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1665 
1666  __ PrepareCallCFunction(0, 2, scratch0());
1667  __ SetCallCDoubleArguments(left, right);
1668  __ CallCFunction(
1669  ExternalReference::double_fp_operation(Token::MOD, isolate()),
1670  0, 2);
1671  // Move the result in the double result register.
1672  __ GetCFunctionDoubleResult(result);
1673 
1674  // Restore r0-r3.
1675  __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1676  break;
1677  }
1678  default:
1679  UNREACHABLE();
1680  break;
1681  }
1682 }
1683 
1684 
1685 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1686  ASSERT(ToRegister(instr->InputAt(0)).is(r1));
1687  ASSERT(ToRegister(instr->InputAt(1)).is(r0));
1688  ASSERT(ToRegister(instr->result()).is(r0));
1689 
1690  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1691  // Block literal pool emission to ensure nop indicating no inlined smi code
1692  // is in the correct position.
1693  Assembler::BlockConstPoolScope block_const_pool(masm());
1694  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1695  __ nop(); // Signals no inlined code.
1696 }
1697 
1698 
1699 int LCodeGen::GetNextEmittedBlock(int block) {
1700  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1701  LLabel* label = chunk_->GetLabel(i);
1702  if (!label->HasReplacement()) return i;
1703  }
1704  return -1;
1705 }
1706 
1707 
1708 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1709  int next_block = GetNextEmittedBlock(current_block_);
1710  right_block = chunk_->LookupDestination(right_block);
1711  left_block = chunk_->LookupDestination(left_block);
1712 
1713  if (right_block == left_block) {
1714  EmitGoto(left_block);
1715  } else if (left_block == next_block) {
1716  __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1717  } else if (right_block == next_block) {
1718  __ b(cc, chunk_->GetAssemblyLabel(left_block));
1719  } else {
1720  __ b(cc, chunk_->GetAssemblyLabel(left_block));
1721  __ b(chunk_->GetAssemblyLabel(right_block));
1722  }
1723 }
1724 
1725 
1726 void LCodeGen::DoBranch(LBranch* instr) {
1727  int true_block = chunk_->LookupDestination(instr->true_block_id());
1728  int false_block = chunk_->LookupDestination(instr->false_block_id());
1729 
1730  Representation r = instr->hydrogen()->value()->representation();
1731  if (r.IsInteger32()) {
1732  Register reg = ToRegister(instr->InputAt(0));
1733  __ cmp(reg, Operand(0));
1734  EmitBranch(true_block, false_block, ne);
1735  } else if (r.IsDouble()) {
1736  DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1737  Register scratch = scratch0();
1738 
1739  // Test the double value. Zero and NaN are false.
1740  __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1741  __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
1742  EmitBranch(true_block, false_block, eq);
1743  } else {
1744  ASSERT(r.IsTagged());
1745  Register reg = ToRegister(instr->InputAt(0));
1746  HType type = instr->hydrogen()->value()->type();
1747  if (type.IsBoolean()) {
1748  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1749  EmitBranch(true_block, false_block, eq);
1750  } else if (type.IsSmi()) {
1751  __ cmp(reg, Operand(0));
1752  EmitBranch(true_block, false_block, ne);
1753  } else {
1754  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1755  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1756 
1757  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1758  // Avoid deopts in the case where we've never executed this path before.
1759  if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1760 
1761  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1762  // undefined -> false.
1763  __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1764  __ b(eq, false_label);
1765  }
1766  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1767  // Boolean -> its value.
1768  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1769  __ b(eq, true_label);
1770  __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1771  __ b(eq, false_label);
1772  }
1773  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1774  // 'null' -> false.
1775  __ CompareRoot(reg, Heap::kNullValueRootIndex);
1776  __ b(eq, false_label);
1777  }
1778 
1779  if (expected.Contains(ToBooleanStub::SMI)) {
1780  // Smis: 0 -> false, all other -> true.
1781  __ cmp(reg, Operand(0));
1782  __ b(eq, false_label);
1783  __ JumpIfSmi(reg, true_label);
1784  } else if (expected.NeedsMap()) {
1785  // If we need a map later and have a Smi -> deopt.
1786  __ tst(reg, Operand(kSmiTagMask));
1787  DeoptimizeIf(eq, instr->environment());
1788  }
1789 
1790  const Register map = scratch0();
1791  if (expected.NeedsMap()) {
1792  __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1793 
1794  if (expected.CanBeUndetectable()) {
1795  // Undetectable -> false.
1797  __ tst(ip, Operand(1 << Map::kIsUndetectable));
1798  __ b(ne, false_label);
1799  }
1800  }
1801 
1802  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1803  // spec object -> true.
1804  __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1805  __ b(ge, true_label);
1806  }
1807 
1808  if (expected.Contains(ToBooleanStub::STRING)) {
1809  // String value -> false iff empty.
1810  Label not_string;
1811  __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1812  __ b(ge, &not_string);
1814  __ cmp(ip, Operand(0));
1815  __ b(ne, true_label);
1816  __ b(false_label);
1817  __ bind(&not_string);
1818  }
1819 
1820  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1821  // heap number -> false iff +0, -0, or NaN.
1822  DoubleRegister dbl_scratch = double_scratch0();
1823  Label not_heap_number;
1824  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1825  __ b(ne, &not_heap_number);
1826  __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1827  __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
1828  __ b(vs, false_label); // NaN -> false.
1829  __ b(eq, false_label); // +0, -0 -> false.
1830  __ b(true_label);
1831  __ bind(&not_heap_number);
1832  }
1833 
1834  // We've seen something for the first time -> deopt.
1835  DeoptimizeIf(al, instr->environment());
1836  }
1837  }
1838 }
1839 
1840 
1841 void LCodeGen::EmitGoto(int block) {
1842  block = chunk_->LookupDestination(block);
1843  int next_block = GetNextEmittedBlock(current_block_);
1844  if (block != next_block) {
1845  __ jmp(chunk_->GetAssemblyLabel(block));
1846  }
1847 }
1848 
1849 
1850 void LCodeGen::DoGoto(LGoto* instr) {
1851  EmitGoto(instr->block_id());
1852 }
1853 
1854 
1855 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1856  Condition cond = kNoCondition;
1857  switch (op) {
1858  case Token::EQ:
1859  case Token::EQ_STRICT:
1860  cond = eq;
1861  break;
1862  case Token::LT:
1863  cond = is_unsigned ? lo : lt;
1864  break;
1865  case Token::GT:
1866  cond = is_unsigned ? hi : gt;
1867  break;
1868  case Token::LTE:
1869  cond = is_unsigned ? ls : le;
1870  break;
1871  case Token::GTE:
1872  cond = is_unsigned ? hs : ge;
1873  break;
1874  case Token::IN:
1875  case Token::INSTANCEOF:
1876  default:
1877  UNREACHABLE();
1878  }
1879  return cond;
1880 }
1881 
1882 
1883 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1884  LOperand* left = instr->InputAt(0);
1885  LOperand* right = instr->InputAt(1);
1886  int false_block = chunk_->LookupDestination(instr->false_block_id());
1887  int true_block = chunk_->LookupDestination(instr->true_block_id());
1888  Condition cond = TokenToCondition(instr->op(), false);
1889 
1890  if (left->IsConstantOperand() && right->IsConstantOperand()) {
1891  // We can statically evaluate the comparison.
1892  double left_val = ToDouble(LConstantOperand::cast(left));
1893  double right_val = ToDouble(LConstantOperand::cast(right));
1894  int next_block =
1895  EvalComparison(instr->op(), left_val, right_val) ? true_block
1896  : false_block;
1897  EmitGoto(next_block);
1898  } else {
1899  if (instr->is_double()) {
1900  // Compare left and right operands as doubles and load the
1901  // resulting flags into the normal status register.
1902  __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
1903  // If a NaN is involved, i.e. the result is unordered (V set),
1904  // jump to false block label.
1905  __ b(vs, chunk_->GetAssemblyLabel(false_block));
1906  } else {
1907  if (right->IsConstantOperand()) {
1908  __ cmp(ToRegister(left),
1909  Operand(ToInteger32(LConstantOperand::cast(right))));
1910  } else if (left->IsConstantOperand()) {
1911  __ cmp(ToRegister(right),
1912  Operand(ToInteger32(LConstantOperand::cast(left))));
1913  // We transposed the operands. Reverse the condition.
1914  cond = ReverseCondition(cond);
1915  } else {
1916  __ cmp(ToRegister(left), ToRegister(right));
1917  }
1918  }
1919  EmitBranch(true_block, false_block, cond);
1920  }
1921 }
1922 
1923 
1924 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1925  Register left = ToRegister(instr->InputAt(0));
1926  Register right = ToRegister(instr->InputAt(1));
1927  int false_block = chunk_->LookupDestination(instr->false_block_id());
1928  int true_block = chunk_->LookupDestination(instr->true_block_id());
1929 
1930  __ cmp(left, Operand(right));
1931  EmitBranch(true_block, false_block, eq);
1932 }
1933 
1934 
1935 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1936  Register left = ToRegister(instr->InputAt(0));
1937  int true_block = chunk_->LookupDestination(instr->true_block_id());
1938  int false_block = chunk_->LookupDestination(instr->false_block_id());
1939 
1940  __ cmp(left, Operand(instr->hydrogen()->right()));
1941  EmitBranch(true_block, false_block, eq);
1942 }
1943 
1944 
1945 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1946  Register scratch = scratch0();
1947  Register reg = ToRegister(instr->InputAt(0));
1948  int false_block = chunk_->LookupDestination(instr->false_block_id());
1949 
1950  // If the expression is known to be untagged or a smi, then it's definitely
1951  // not null, and it can't be a an undetectable object.
1952  if (instr->hydrogen()->representation().IsSpecialization() ||
1953  instr->hydrogen()->type().IsSmi()) {
1954  EmitGoto(false_block);
1955  return;
1956  }
1957 
1958  int true_block = chunk_->LookupDestination(instr->true_block_id());
1959  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1960  Heap::kNullValueRootIndex :
1961  Heap::kUndefinedValueRootIndex;
1962  __ LoadRoot(ip, nil_value);
1963  __ cmp(reg, ip);
1964  if (instr->kind() == kStrictEquality) {
1965  EmitBranch(true_block, false_block, eq);
1966  } else {
1967  Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1968  Heap::kUndefinedValueRootIndex :
1969  Heap::kNullValueRootIndex;
1970  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1971  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1972  __ b(eq, true_label);
1973  __ LoadRoot(ip, other_nil_value);
1974  __ cmp(reg, ip);
1975  __ b(eq, true_label);
1976  __ JumpIfSmi(reg, false_label);
1977  // Check for undetectable objects by looking in the bit field in
1978  // the map. The object has already been smi checked.
1979  __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1980  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1981  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
1982  EmitBranch(true_block, false_block, ne);
1983  }
1984 }
1985 
1986 
1987 Condition LCodeGen::EmitIsObject(Register input,
1988  Register temp1,
1989  Label* is_not_object,
1990  Label* is_object) {
1991  Register temp2 = scratch0();
1992  __ JumpIfSmi(input, is_not_object);
1993 
1994  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1995  __ cmp(input, temp2);
1996  __ b(eq, is_object);
1997 
1998  // Load map.
1999  __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2000  // Undetectable objects behave like undefined.
2001  __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2002  __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2003  __ b(ne, is_not_object);
2004 
2005  // Load instance type and check that it is in object type range.
2006  __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2007  __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2008  __ b(lt, is_not_object);
2009  __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2010  return le;
2011 }
2012 
2013 
2014 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2015  Register reg = ToRegister(instr->InputAt(0));
2016  Register temp1 = ToRegister(instr->TempAt(0));
2017 
2018  int true_block = chunk_->LookupDestination(instr->true_block_id());
2019  int false_block = chunk_->LookupDestination(instr->false_block_id());
2020  Label* true_label = chunk_->GetAssemblyLabel(true_block);
2021  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2022 
2023  Condition true_cond =
2024  EmitIsObject(reg, temp1, false_label, true_label);
2025 
2026  EmitBranch(true_block, false_block, true_cond);
2027 }
2028 
2029 
2030 Condition LCodeGen::EmitIsString(Register input,
2031  Register temp1,
2032  Label* is_not_string) {
2033  __ JumpIfSmi(input, is_not_string);
2034  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2035 
2036  return lt;
2037 }
2038 
2039 
2040 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2041  Register reg = ToRegister(instr->InputAt(0));
2042  Register temp1 = ToRegister(instr->TempAt(0));
2043 
2044  int true_block = chunk_->LookupDestination(instr->true_block_id());
2045  int false_block = chunk_->LookupDestination(instr->false_block_id());
2046  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2047 
2048  Condition true_cond =
2049  EmitIsString(reg, temp1, false_label);
2050 
2051  EmitBranch(true_block, false_block, true_cond);
2052 }
2053 
2054 
2055 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2056  int true_block = chunk_->LookupDestination(instr->true_block_id());
2057  int false_block = chunk_->LookupDestination(instr->false_block_id());
2058 
2059  Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
2060  __ tst(input_reg, Operand(kSmiTagMask));
2061  EmitBranch(true_block, false_block, eq);
2062 }
2063 
2064 
2065 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2066  Register input = ToRegister(instr->InputAt(0));
2067  Register temp = ToRegister(instr->TempAt(0));
2068 
2069  int true_block = chunk_->LookupDestination(instr->true_block_id());
2070  int false_block = chunk_->LookupDestination(instr->false_block_id());
2071 
2072  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
2073  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2074  __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2075  __ tst(temp, Operand(1 << Map::kIsUndetectable));
2076  EmitBranch(true_block, false_block, ne);
2077 }
2078 
2079 
2080 static Condition ComputeCompareCondition(Token::Value op) {
2081  switch (op) {
2082  case Token::EQ_STRICT:
2083  case Token::EQ:
2084  return eq;
2085  case Token::LT:
2086  return lt;
2087  case Token::GT:
2088  return gt;
2089  case Token::LTE:
2090  return le;
2091  case Token::GTE:
2092  return ge;
2093  default:
2094  UNREACHABLE();
2095  return kNoCondition;
2096  }
2097 }
2098 
2099 
2100 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2101  Token::Value op = instr->op();
2102  int true_block = chunk_->LookupDestination(instr->true_block_id());
2103  int false_block = chunk_->LookupDestination(instr->false_block_id());
2104 
2105  Handle<Code> ic = CompareIC::GetUninitialized(op);
2106  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2107  __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2108 
2109  Condition condition = ComputeCompareCondition(op);
2110 
2111  EmitBranch(true_block, false_block, condition);
2112 }
2113 
2114 
2115 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2116  InstanceType from = instr->from();
2117  InstanceType to = instr->to();
2118  if (from == FIRST_TYPE) return to;
2119  ASSERT(from == to || to == LAST_TYPE);
2120  return from;
2121 }
2122 
2123 
2124 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2125  InstanceType from = instr->from();
2126  InstanceType to = instr->to();
2127  if (from == to) return eq;
2128  if (to == LAST_TYPE) return hs;
2129  if (from == FIRST_TYPE) return ls;
2130  UNREACHABLE();
2131  return eq;
2132 }
2133 
2134 
2135 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2136  Register scratch = scratch0();
2137  Register input = ToRegister(instr->InputAt(0));
2138 
2139  int true_block = chunk_->LookupDestination(instr->true_block_id());
2140  int false_block = chunk_->LookupDestination(instr->false_block_id());
2141 
2142  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2143 
2144  __ JumpIfSmi(input, false_label);
2145 
2146  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2147  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
2148 }
2149 
2150 
2151 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2152  Register input = ToRegister(instr->InputAt(0));
2153  Register result = ToRegister(instr->result());
2154 
2155  if (FLAG_debug_code) {
2156  __ AbortIfNotString(input);
2157  }
2158 
2159  __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2160  __ IndexFromHash(result, result);
2161 }
2162 
2163 
2164 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2165  LHasCachedArrayIndexAndBranch* instr) {
2166  Register input = ToRegister(instr->InputAt(0));
2167  Register scratch = scratch0();
2168 
2169  int true_block = chunk_->LookupDestination(instr->true_block_id());
2170  int false_block = chunk_->LookupDestination(instr->false_block_id());
2171 
2172  __ ldr(scratch,
2174  __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2175  EmitBranch(true_block, false_block, eq);
2176 }
2177 
2178 
2179 // Branches to a label or falls through with the answer in flags. Trashes
2180 // the temp registers, but not the input.
2181 void LCodeGen::EmitClassOfTest(Label* is_true,
2182  Label* is_false,
2183  Handle<String>class_name,
2184  Register input,
2185  Register temp,
2186  Register temp2) {
2187  ASSERT(!input.is(temp));
2188  ASSERT(!input.is(temp2));
2189  ASSERT(!temp.is(temp2));
2190 
2191  __ JumpIfSmi(input, is_false);
2192 
2193  if (class_name->IsEqualTo(CStrVector("Function"))) {
2194  // Assuming the following assertions, we can use the same compares to test
2195  // for both being a function type and being in the object type range.
2200  LAST_SPEC_OBJECT_TYPE - 1);
2202  __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2203  __ b(lt, is_false);
2204  __ b(eq, is_true);
2205  __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2206  __ b(eq, is_true);
2207  } else {
2208  // Faster code path to avoid two compares: subtract lower bound from the
2209  // actual type and do a signed compare with the width of the type range.
2210  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2211  __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2212  __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2213  __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2215  __ b(gt, is_false);
2216  }
2217 
2218  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2219  // Check if the constructor in the map is a function.
2220  __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2221 
2222  // Objects with a non-function constructor have class 'Object'.
2223  __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2224  if (class_name->IsEqualTo(CStrVector("Object"))) {
2225  __ b(ne, is_true);
2226  } else {
2227  __ b(ne, is_false);
2228  }
2229 
2230  // temp now contains the constructor function. Grab the
2231  // instance class name from there.
2233  __ ldr(temp, FieldMemOperand(temp,
2235  // The class name we are testing against is a symbol because it's a literal.
2236  // The name in the constructor is a symbol because of the way the context is
2237  // booted. This routine isn't expected to work for random API-created
2238  // classes and it doesn't have to because you can't access it with natives
2239  // syntax. Since both sides are symbols it is sufficient to use an identity
2240  // comparison.
2241  __ cmp(temp, Operand(class_name));
2242  // End with the answer in flags.
2243 }
2244 
2245 
2246 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2247  Register input = ToRegister(instr->InputAt(0));
2248  Register temp = scratch0();
2249  Register temp2 = ToRegister(instr->TempAt(0));
2250  Handle<String> class_name = instr->hydrogen()->class_name();
2251 
2252  int true_block = chunk_->LookupDestination(instr->true_block_id());
2253  int false_block = chunk_->LookupDestination(instr->false_block_id());
2254 
2255  Label* true_label = chunk_->GetAssemblyLabel(true_block);
2256  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2257 
2258  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2259 
2260  EmitBranch(true_block, false_block, eq);
2261 }
2262 
2263 
2264 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2265  Register reg = ToRegister(instr->InputAt(0));
2266  Register temp = ToRegister(instr->TempAt(0));
2267  int true_block = instr->true_block_id();
2268  int false_block = instr->false_block_id();
2269 
2270  __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2271  __ cmp(temp, Operand(instr->map()));
2272  EmitBranch(true_block, false_block, eq);
2273 }
2274 
2275 
2276 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2277  ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
2278  ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
2279 
2280  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2281  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2282 
2283  __ cmp(r0, Operand(0));
2284  __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2285  __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2286 }
2287 
2288 
2289 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2290  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2291  public:
2292  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2293  LInstanceOfKnownGlobal* instr)
2294  : LDeferredCode(codegen), instr_(instr) { }
2295  virtual void Generate() {
2296  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2297  }
2298  virtual LInstruction* instr() { return instr_; }
2299  Label* map_check() { return &map_check_; }
2300  private:
2301  LInstanceOfKnownGlobal* instr_;
2302  Label map_check_;
2303  };
2304 
2305  DeferredInstanceOfKnownGlobal* deferred;
2306  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2307 
2308  Label done, false_result;
2309  Register object = ToRegister(instr->InputAt(0));
2310  Register temp = ToRegister(instr->TempAt(0));
2311  Register result = ToRegister(instr->result());
2312 
2313  ASSERT(object.is(r0));
2314  ASSERT(result.is(r0));
2315 
2316  // A Smi is not instance of anything.
2317  __ JumpIfSmi(object, &false_result);
2318 
2319  // This is the inlined call site instanceof cache. The two occurences of the
2320  // hole value will be patched to the last map/result pair generated by the
2321  // instanceof stub.
2322  Label cache_miss;
2323  Register map = temp;
2324  __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2325  {
2326  // Block constant pool emission to ensure the positions of instructions are
2327  // as expected by the patcher. See InstanceofStub::Generate().
2328  Assembler::BlockConstPoolScope block_const_pool(masm());
2329  __ bind(deferred->map_check()); // Label for calculating code patching.
2330  // We use Factory::the_hole_value() on purpose instead of loading from the
2331  // root array to force relocation to be able to later patch with
2332  // the cached map.
2333  Handle<JSGlobalPropertyCell> cell =
2334  factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2335  __ mov(ip, Operand(Handle<Object>(cell)));
2337  __ cmp(map, Operand(ip));
2338  __ b(ne, &cache_miss);
2339  // We use Factory::the_hole_value() on purpose instead of loading from the
2340  // root array to force relocation to be able to later patch
2341  // with true or false.
2342  __ mov(result, Operand(factory()->the_hole_value()));
2343  }
2344  __ b(&done);
2345 
2346  // The inlined call site cache did not match. Check null and string before
2347  // calling the deferred code.
2348  __ bind(&cache_miss);
2349  // Null is not instance of anything.
2350  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2351  __ cmp(object, Operand(ip));
2352  __ b(eq, &false_result);
2353 
2354  // String values is not instance of anything.
2355  Condition is_string = masm_->IsObjectStringType(object, temp);
2356  __ b(is_string, &false_result);
2357 
2358  // Go to the deferred code.
2359  __ b(deferred->entry());
2360 
2361  __ bind(&false_result);
2362  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2363 
2364  // Here result has either true or false. Deferred code also produces true or
2365  // false object.
2366  __ bind(deferred->exit());
2367  __ bind(&done);
2368 }
2369 
2370 
2371 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2372  Label* map_check) {
2373  Register result = ToRegister(instr->result());
2374  ASSERT(result.is(r0));
2375 
2377  flags = static_cast<InstanceofStub::Flags>(
2379  flags = static_cast<InstanceofStub::Flags>(
2381  flags = static_cast<InstanceofStub::Flags>(
2383  InstanceofStub stub(flags);
2384 
2385  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2386 
2387  // Get the temp register reserved by the instruction. This needs to be r4 as
2388  // its slot of the pushing of safepoint registers is used to communicate the
2389  // offset to the location of the map check.
2390  Register temp = ToRegister(instr->TempAt(0));
2391  ASSERT(temp.is(r4));
2392  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2393  static const int kAdditionalDelta = 4;
2394  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2395  Label before_push_delta;
2396  __ bind(&before_push_delta);
2397  __ BlockConstPoolFor(kAdditionalDelta);
2398  __ mov(temp, Operand(delta * kPointerSize));
2399  __ StoreToSafepointRegisterSlot(temp, temp);
2400  CallCodeGeneric(stub.GetCode(),
2401  RelocInfo::CODE_TARGET,
2402  instr,
2403  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2404  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2405  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2406  // Put the result value into the result register slot and
2407  // restore all registers.
2408  __ StoreToSafepointRegisterSlot(result, result);
2409 }
2410 
2411 
2412 void LCodeGen::DoCmpT(LCmpT* instr) {
2413  Token::Value op = instr->op();
2414 
2415  Handle<Code> ic = CompareIC::GetUninitialized(op);
2416  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2417  __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2418 
2419  Condition condition = ComputeCompareCondition(op);
2420  __ LoadRoot(ToRegister(instr->result()),
2421  Heap::kTrueValueRootIndex,
2422  condition);
2423  __ LoadRoot(ToRegister(instr->result()),
2424  Heap::kFalseValueRootIndex,
2425  NegateCondition(condition));
2426 }
2427 
2428 
2429 void LCodeGen::DoReturn(LReturn* instr) {
2430  if (FLAG_trace) {
2431  // Push the return value on the stack as the parameter.
2432  // Runtime::TraceExit returns its parameter in r0.
2433  __ push(r0);
2434  __ CallRuntime(Runtime::kTraceExit, 1);
2435  }
2436  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2437  __ mov(sp, fp);
2438  __ ldm(ia_w, sp, fp.bit() | lr.bit());
2439  __ add(sp, sp, Operand(sp_delta));
2440  __ Jump(lr);
2441 }
2442 
2443 
2444 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2445  Register result = ToRegister(instr->result());
2446  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2448  if (instr->hydrogen()->RequiresHoleCheck()) {
2449  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2450  __ cmp(result, ip);
2451  DeoptimizeIf(eq, instr->environment());
2452  }
2453 }
2454 
2455 
2456 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2457  ASSERT(ToRegister(instr->global_object()).is(r0));
2458  ASSERT(ToRegister(instr->result()).is(r0));
2459 
2460  __ mov(r2, Operand(instr->name()));
2461  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2462  : RelocInfo::CODE_TARGET_CONTEXT;
2463  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2464  CallCode(ic, mode, instr);
2465 }
2466 
2467 
2468 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2469  Register value = ToRegister(instr->value());
2470  Register cell = scratch0();
2471 
2472  // Load the cell.
2473  __ mov(cell, Operand(instr->hydrogen()->cell()));
2474 
2475  // If the cell we are storing to contains the hole it could have
2476  // been deleted from the property dictionary. In that case, we need
2477  // to update the property details in the property dictionary to mark
2478  // it as no longer deleted.
2479  if (instr->hydrogen()->RequiresHoleCheck()) {
2480  // We use a temp to check the payload (CompareRoot might clobber ip).
2481  Register payload = ToRegister(instr->TempAt(0));
2483  __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2484  DeoptimizeIf(eq, instr->environment());
2485  }
2486 
2487  // Store the value.
2489  // Cells are always rescanned, so no write barrier here.
2490 }
2491 
2492 
2493 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2494  ASSERT(ToRegister(instr->global_object()).is(r1));
2495  ASSERT(ToRegister(instr->value()).is(r0));
2496 
2497  __ mov(r2, Operand(instr->name()));
2498  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2499  ? isolate()->builtins()->StoreIC_Initialize_Strict()
2500  : isolate()->builtins()->StoreIC_Initialize();
2501  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2502 }
2503 
2504 
2505 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2506  Register context = ToRegister(instr->context());
2507  Register result = ToRegister(instr->result());
2508  __ ldr(result, ContextOperand(context, instr->slot_index()));
2509  if (instr->hydrogen()->RequiresHoleCheck()) {
2510  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2511  __ cmp(result, ip);
2512  if (instr->hydrogen()->DeoptimizesOnHole()) {
2513  DeoptimizeIf(eq, instr->environment());
2514  } else {
2515  __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
2516  }
2517  }
2518 }
2519 
2520 
2521 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2522  Register context = ToRegister(instr->context());
2523  Register value = ToRegister(instr->value());
2524  Register scratch = scratch0();
2525  MemOperand target = ContextOperand(context, instr->slot_index());
2526 
2527  Label skip_assignment;
2528 
2529  if (instr->hydrogen()->RequiresHoleCheck()) {
2530  __ ldr(scratch, target);
2531  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2532  __ cmp(scratch, ip);
2533  if (instr->hydrogen()->DeoptimizesOnHole()) {
2534  DeoptimizeIf(eq, instr->environment());
2535  } else {
2536  __ b(ne, &skip_assignment);
2537  }
2538  }
2539 
2540  __ str(value, target);
2541  if (instr->hydrogen()->NeedsWriteBarrier()) {
2542  HType type = instr->hydrogen()->value()->type();
2543  SmiCheck check_needed =
2544  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2545  __ RecordWriteContextSlot(context,
2546  target.offset(),
2547  value,
2548  scratch,
2550  kSaveFPRegs,
2552  check_needed);
2553  }
2554 
2555  __ bind(&skip_assignment);
2556 }
2557 
2558 
2559 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2560  Register object = ToRegister(instr->InputAt(0));
2561  Register result = ToRegister(instr->result());
2562  if (instr->hydrogen()->is_in_object()) {
2563  __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2564  } else {
2565  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2566  __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2567  }
2568 }
2569 
2570 
2571 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2572  Register object,
2573  Handle<Map> type,
2574  Handle<String> name,
2575  LEnvironment* env) {
2576  LookupResult lookup(isolate());
2577  type->LookupInDescriptors(NULL, *name, &lookup);
2578  ASSERT(lookup.IsFound() || lookup.IsCacheable());
2579  if (lookup.IsFound() && lookup.type() == FIELD) {
2580  int index = lookup.GetLocalFieldIndexFromMap(*type);
2581  int offset = index * kPointerSize;
2582  if (index < 0) {
2583  // Negative property indices are in-object properties, indexed
2584  // from the end of the fixed part of the object.
2585  __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
2586  } else {
2587  // Non-negative property indices are in the properties array.
2588  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2589  __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2590  }
2591  } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
2592  Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2593  __ LoadHeapObject(result, function);
2594  } else {
2595  // Negative lookup.
2596  // Check prototypes.
2597  HeapObject* current = HeapObject::cast((*type)->prototype());
2598  Heap* heap = type->GetHeap();
2599  while (current != heap->null_value()) {
2600  Handle<HeapObject> link(current);
2601  __ LoadHeapObject(result, link);
2602  __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
2603  __ cmp(result, Operand(Handle<Map>(JSObject::cast(current)->map())));
2604  DeoptimizeIf(ne, env);
2605  current = HeapObject::cast(current->map()->prototype());
2606  }
2607  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2608  }
2609 }
2610 
2611 
2612 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2613  Register object = ToRegister(instr->object());
2614  Register result = ToRegister(instr->result());
2615  Register object_map = scratch0();
2616 
2617  int map_count = instr->hydrogen()->types()->length();
2618  bool need_generic = instr->hydrogen()->need_generic();
2619 
2620  if (map_count == 0 && !need_generic) {
2621  DeoptimizeIf(al, instr->environment());
2622  return;
2623  }
2624  Handle<String> name = instr->hydrogen()->name();
2625  Label done;
2626  __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2627  for (int i = 0; i < map_count; ++i) {
2628  bool last = (i == map_count - 1);
2629  Handle<Map> map = instr->hydrogen()->types()->at(i);
2630  Label check_passed;
2631  __ CompareMap(
2632  object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
2633  if (last && !need_generic) {
2634  DeoptimizeIf(ne, instr->environment());
2635  __ bind(&check_passed);
2636  EmitLoadFieldOrConstantFunction(
2637  result, object, map, name, instr->environment());
2638  } else {
2639  Label next;
2640  __ b(ne, &next);
2641  __ bind(&check_passed);
2642  EmitLoadFieldOrConstantFunction(
2643  result, object, map, name, instr->environment());
2644  __ b(&done);
2645  __ bind(&next);
2646  }
2647  }
2648  if (need_generic) {
2649  __ mov(r2, Operand(name));
2650  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2651  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2652  }
2653  __ bind(&done);
2654 }
2655 
2656 
2657 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2658  ASSERT(ToRegister(instr->object()).is(r0));
2659  ASSERT(ToRegister(instr->result()).is(r0));
2660 
2661  // Name is always in r2.
2662  __ mov(r2, Operand(instr->name()));
2663  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2664  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2665 }
2666 
2667 
2668 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2669  Register scratch = scratch0();
2670  Register function = ToRegister(instr->function());
2671  Register result = ToRegister(instr->result());
2672 
2673  // Check that the function really is a function. Load map into the
2674  // result register.
2675  __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2676  DeoptimizeIf(ne, instr->environment());
2677 
2678  // Make sure that the function has an instance prototype.
2679  Label non_instance;
2680  __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2681  __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2682  __ b(ne, &non_instance);
2683 
2684  // Get the prototype or initial map from the function.
2685  __ ldr(result,
2687 
2688  // Check that the function has a prototype or an initial map.
2689  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2690  __ cmp(result, ip);
2691  DeoptimizeIf(eq, instr->environment());
2692 
2693  // If the function does not have an initial map, we're done.
2694  Label done;
2695  __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2696  __ b(ne, &done);
2697 
2698  // Get the prototype from the initial map.
2699  __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2700  __ jmp(&done);
2701 
2702  // Non-instance prototype: Fetch prototype from constructor field
2703  // in initial map.
2704  __ bind(&non_instance);
2705  __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2706 
2707  // All done.
2708  __ bind(&done);
2709 }
2710 
2711 
2712 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2713  Register result = ToRegister(instr->result());
2714  Register input = ToRegister(instr->InputAt(0));
2715  Register scratch = scratch0();
2716 
2717  __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
2718  if (FLAG_debug_code) {
2719  Label done, fail;
2720  __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2721  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2722  __ cmp(scratch, ip);
2723  __ b(eq, &done);
2724  __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2725  __ cmp(scratch, ip);
2726  __ b(eq, &done);
2727  // |scratch| still contains |input|'s map.
2728  __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2729  __ ubfx(scratch, scratch, Map::kElementsKindShift,
2731  __ cmp(scratch, Operand(GetInitialFastElementsKind()));
2732  __ b(lt, &fail);
2733  __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
2734  __ b(le, &done);
2735  __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2736  __ b(lt, &fail);
2737  __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2738  __ b(le, &done);
2739  __ bind(&fail);
2740  __ Abort("Check for fast or external elements failed.");
2741  __ bind(&done);
2742  }
2743 }
2744 
2745 
2746 void LCodeGen::DoLoadExternalArrayPointer(
2747  LLoadExternalArrayPointer* instr) {
2748  Register to_reg = ToRegister(instr->result());
2749  Register from_reg = ToRegister(instr->InputAt(0));
2750  __ ldr(to_reg, FieldMemOperand(from_reg,
2752 }
2753 
2754 
2755 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2756  Register arguments = ToRegister(instr->arguments());
2757  Register length = ToRegister(instr->length());
2758  Register index = ToRegister(instr->index());
2759  Register result = ToRegister(instr->result());
2760 
2761  // Bailout index is not a valid argument index. Use unsigned check to get
2762  // negative check for free.
2763  __ sub(length, length, index, SetCC);
2764  DeoptimizeIf(ls, instr->environment());
2765 
2766  // There are two words between the frame pointer and the last argument.
2767  // Subtracting from length accounts for one of them add one more.
2768  __ add(length, length, Operand(1));
2769  __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
2770 }
2771 
2772 
2773 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2774  Register elements = ToRegister(instr->elements());
2775  Register key = EmitLoadRegister(instr->key(), scratch0());
2776  Register result = ToRegister(instr->result());
2777  Register scratch = scratch0();
2778 
2779  // Load the result.
2780  __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
2781  uint32_t offset = FixedArray::kHeaderSize +
2782  (instr->additional_index() << kPointerSizeLog2);
2783  __ ldr(result, FieldMemOperand(scratch, offset));
2784 
2785  // Check for the hole value.
2786  if (instr->hydrogen()->RequiresHoleCheck()) {
2787  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2788  __ tst(result, Operand(kSmiTagMask));
2789  DeoptimizeIf(ne, instr->environment());
2790  } else {
2791  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2792  __ cmp(result, scratch);
2793  DeoptimizeIf(eq, instr->environment());
2794  }
2795  }
2796 }
2797 
2798 
2799 void LCodeGen::DoLoadKeyedFastDoubleElement(
2800  LLoadKeyedFastDoubleElement* instr) {
2801  Register elements = ToRegister(instr->elements());
2802  bool key_is_constant = instr->key()->IsConstantOperand();
2803  Register key = no_reg;
2804  DwVfpRegister result = ToDoubleRegister(instr->result());
2805  Register scratch = scratch0();
2806 
2807  int shift_size =
2809  int constant_key = 0;
2810  if (key_is_constant) {
2811  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2812  if (constant_key & 0xF0000000) {
2813  Abort("array index constant value too big.");
2814  }
2815  } else {
2816  key = ToRegister(instr->key());
2817  }
2818 
2819  Operand operand = key_is_constant
2820  ? Operand(((constant_key + instr->additional_index()) << shift_size) +
2822  : Operand(key, LSL, shift_size);
2823  __ add(elements, elements, operand);
2824  if (!key_is_constant) {
2825  __ add(elements, elements,
2827  (instr->additional_index() << shift_size)));
2828  }
2829 
2830  if (instr->hydrogen()->RequiresHoleCheck()) {
2831  __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2832  __ cmp(scratch, Operand(kHoleNanUpper32));
2833  DeoptimizeIf(eq, instr->environment());
2834  }
2835 
2836  __ vldr(result, elements, 0);
2837 }
2838 
2839 
2840 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2841  LLoadKeyedSpecializedArrayElement* instr) {
2842  Register external_pointer = ToRegister(instr->external_pointer());
2843  Register key = no_reg;
2844  ElementsKind elements_kind = instr->elements_kind();
2845  bool key_is_constant = instr->key()->IsConstantOperand();
2846  int constant_key = 0;
2847  if (key_is_constant) {
2848  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2849  if (constant_key & 0xF0000000) {
2850  Abort("array index constant value too big.");
2851  }
2852  } else {
2853  key = ToRegister(instr->key());
2854  }
2855  int shift_size = ElementsKindToShiftSize(elements_kind);
2856  int additional_offset = instr->additional_index() << shift_size;
2857 
2858  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2859  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2860  CpuFeatures::Scope scope(VFP3);
2861  DwVfpRegister result = ToDoubleRegister(instr->result());
2862  Operand operand = key_is_constant
2863  ? Operand(constant_key << shift_size)
2864  : Operand(key, LSL, shift_size);
2865  __ add(scratch0(), external_pointer, operand);
2866  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2867  __ vldr(result.low(), scratch0(), additional_offset);
2868  __ vcvt_f64_f32(result, result.low());
2869  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2870  __ vldr(result, scratch0(), additional_offset);
2871  }
2872  } else {
2873  Register result = ToRegister(instr->result());
2874  if (instr->additional_index() != 0 && !key_is_constant) {
2875  __ add(scratch0(), key, Operand(instr->additional_index()));
2876  }
2877  MemOperand mem_operand(key_is_constant
2878  ? MemOperand(external_pointer,
2879  (constant_key << shift_size) + additional_offset)
2880  : (instr->additional_index() == 0
2881  ? MemOperand(external_pointer, key, LSL, shift_size)
2882  : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
2883  switch (elements_kind) {
2885  __ ldrsb(result, mem_operand);
2886  break;
2889  __ ldrb(result, mem_operand);
2890  break;
2892  __ ldrsh(result, mem_operand);
2893  break;
2895  __ ldrh(result, mem_operand);
2896  break;
2897  case EXTERNAL_INT_ELEMENTS:
2898  __ ldr(result, mem_operand);
2899  break;
2901  __ ldr(result, mem_operand);
2902  __ cmp(result, Operand(0x80000000));
2903  // TODO(danno): we could be more clever here, perhaps having a special
2904  // version of the stub that detects if the overflow case actually
2905  // happens, and generate code that returns a double rather than int.
2906  DeoptimizeIf(cs, instr->environment());
2907  break;
2911  case FAST_HOLEY_ELEMENTS:
2913  case FAST_DOUBLE_ELEMENTS:
2914  case FAST_ELEMENTS:
2915  case FAST_SMI_ELEMENTS:
2916  case DICTIONARY_ELEMENTS:
2918  UNREACHABLE();
2919  break;
2920  }
2921  }
2922 }
2923 
2924 
2925 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2926  ASSERT(ToRegister(instr->object()).is(r1));
2927  ASSERT(ToRegister(instr->key()).is(r0));
2928 
2929  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2930  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2931 }
2932 
2933 
2934 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2935  Register scratch = scratch0();
2936  Register result = ToRegister(instr->result());
2937 
2938  if (instr->hydrogen()->from_inlined()) {
2939  __ sub(result, sp, Operand(2 * kPointerSize));
2940  } else {
2941  // Check if the calling frame is an arguments adaptor frame.
2942  Label done, adapted;
2944  __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
2945  __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2946 
2947  // Result is the frame pointer for the frame if not adapted and for the real
2948  // frame below the adaptor frame if adapted.
2949  __ mov(result, fp, LeaveCC, ne);
2950  __ mov(result, scratch, LeaveCC, eq);
2951  }
2952 }
2953 
2954 
2955 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2956  Register elem = ToRegister(instr->InputAt(0));
2957  Register result = ToRegister(instr->result());
2958 
2959  Label done;
2960 
2961  // If no arguments adaptor frame the number of arguments is fixed.
2962  __ cmp(fp, elem);
2963  __ mov(result, Operand(scope()->num_parameters()));
2964  __ b(eq, &done);
2965 
2966  // Arguments adaptor frame present. Get argument length from there.
2968  __ ldr(result,
2970  __ SmiUntag(result);
2971 
2972  // Argument length is in result register.
2973  __ bind(&done);
2974 }
2975 
2976 
2977 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2978  Register receiver = ToRegister(instr->receiver());
2979  Register function = ToRegister(instr->function());
2980  Register scratch = scratch0();
2981 
2982  // If the receiver is null or undefined, we have to pass the global
2983  // object as a receiver to normal functions. Values have to be
2984  // passed unchanged to builtins and strict-mode functions.
2985  Label global_object, receiver_ok;
2986 
2987  // Do not transform the receiver to object for strict mode
2988  // functions.
2989  __ ldr(scratch,
2991  __ ldr(scratch,
2993  __ tst(scratch,
2995  __ b(ne, &receiver_ok);
2996 
2997  // Do not transform the receiver to object for builtins.
2998  __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
2999  __ b(ne, &receiver_ok);
3000 
3001  // Normal function. Replace undefined or null with global receiver.
3002  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3003  __ cmp(receiver, scratch);
3004  __ b(eq, &global_object);
3005  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3006  __ cmp(receiver, scratch);
3007  __ b(eq, &global_object);
3008 
3009  // Deoptimize if the receiver is not a JS object.
3010  __ tst(receiver, Operand(kSmiTagMask));
3011  DeoptimizeIf(eq, instr->environment());
3012  __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3013  DeoptimizeIf(lt, instr->environment());
3014  __ jmp(&receiver_ok);
3015 
3016  __ bind(&global_object);
3017  __ ldr(receiver, GlobalObjectOperand());
3018  __ ldr(receiver,
3020  __ bind(&receiver_ok);
3021 }
3022 
3023 
3024 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3025  Register receiver = ToRegister(instr->receiver());
3026  Register function = ToRegister(instr->function());
3027  Register length = ToRegister(instr->length());
3028  Register elements = ToRegister(instr->elements());
3029  Register scratch = scratch0();
3030  ASSERT(receiver.is(r0)); // Used for parameter count.
3031  ASSERT(function.is(r1)); // Required by InvokeFunction.
3032  ASSERT(ToRegister(instr->result()).is(r0));
3033 
3034  // Copy the arguments to this function possibly from the
3035  // adaptor frame below it.
3036  const uint32_t kArgumentsLimit = 1 * KB;
3037  __ cmp(length, Operand(kArgumentsLimit));
3038  DeoptimizeIf(hi, instr->environment());
3039 
3040  // Push the receiver and use the register to keep the original
3041  // number of arguments.
3042  __ push(receiver);
3043  __ mov(receiver, length);
3044  // The arguments are at a one pointer size offset from elements.
3045  __ add(elements, elements, Operand(1 * kPointerSize));
3046 
3047  // Loop through the arguments pushing them onto the execution
3048  // stack.
3049  Label invoke, loop;
3050  // length is a small non-negative integer, due to the test above.
3051  __ cmp(length, Operand(0));
3052  __ b(eq, &invoke);
3053  __ bind(&loop);
3054  __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3055  __ push(scratch);
3056  __ sub(length, length, Operand(1), SetCC);
3057  __ b(ne, &loop);
3058 
3059  __ bind(&invoke);
3060  ASSERT(instr->HasPointerMap());
3061  LPointerMap* pointers = instr->pointer_map();
3062  RecordPosition(pointers->position());
3063  SafepointGenerator safepoint_generator(
3064  this, pointers, Safepoint::kLazyDeopt);
3065  // The number of arguments is stored in receiver which is r0, as expected
3066  // by InvokeFunction.
3067  ParameterCount actual(receiver);
3068  __ InvokeFunction(function, actual, CALL_FUNCTION,
3069  safepoint_generator, CALL_AS_METHOD);
3071 }
3072 
3073 
3074 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3075  LOperand* argument = instr->InputAt(0);
3076  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3077  Abort("DoPushArgument not implemented for double type.");
3078  } else {
3079  Register argument_reg = EmitLoadRegister(argument, ip);
3080  __ push(argument_reg);
3081  }
3082 }
3083 
3084 
3085 void LCodeGen::DoDrop(LDrop* instr) {
3086  __ Drop(instr->count());
3087 }
3088 
3089 
3090 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3091  Register result = ToRegister(instr->result());
3092  __ LoadHeapObject(result, instr->hydrogen()->closure());
3093 }
3094 
3095 
3096 void LCodeGen::DoContext(LContext* instr) {
3097  Register result = ToRegister(instr->result());
3098  __ mov(result, cp);
3099 }
3100 
3101 
3102 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3103  Register context = ToRegister(instr->context());
3104  Register result = ToRegister(instr->result());
3105  __ ldr(result,
3107 }
3108 
3109 
3110 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3111  __ push(cp); // The context is the first argument.
3112  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3113  __ push(scratch0());
3114  __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3115  __ push(scratch0());
3116  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3117 }
3118 
3119 
3120 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3121  Register result = ToRegister(instr->result());
3122  __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
3123 }
3124 
3125 
3126 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3127  Register global = ToRegister(instr->global());
3128  Register result = ToRegister(instr->result());
3130 }
3131 
3132 
3133 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3134  int arity,
3135  LInstruction* instr,
3136  CallKind call_kind,
3137  R1State r1_state) {
3138  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
3139  function->shared()->formal_parameter_count() == arity;
3140 
3141  LPointerMap* pointers = instr->pointer_map();
3142  RecordPosition(pointers->position());
3143 
3144  if (can_invoke_directly) {
3145  if (r1_state == R1_UNINITIALIZED) {
3146  __ LoadHeapObject(r1, function);
3147  }
3148 
3149  // Change context if needed.
3150  bool change_context =
3151  (info()->closure()->context() != function->context()) ||
3152  scope()->contains_with() ||
3153  (scope()->num_heap_slots() > 0);
3154  if (change_context) {
3156  }
3157 
3158  // Set r0 to arguments count if adaption is not needed. Assumes that r0
3159  // is available to write to at this point.
3160  if (!function->NeedsArgumentsAdaption()) {
3161  __ mov(r0, Operand(arity));
3162  }
3163 
3164  // Invoke function.
3165  __ SetCallKind(r5, call_kind);
3167  __ Call(ip);
3168 
3169  // Set up deoptimization.
3170  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3171  } else {
3172  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3173  ParameterCount count(arity);
3174  __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
3175  }
3176 
3177  // Restore context.
3179 }
3180 
3181 
3182 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3183  ASSERT(ToRegister(instr->result()).is(r0));
3184  CallKnownFunction(instr->function(),
3185  instr->arity(),
3186  instr,
3188  R1_UNINITIALIZED);
3189 }
3190 
3191 
3192 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3193  Register input = ToRegister(instr->InputAt(0));
3194  Register result = ToRegister(instr->result());
3195  Register scratch = scratch0();
3196 
3197  // Deoptimize if not a heap number.
3198  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3199  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3200  __ cmp(scratch, Operand(ip));
3201  DeoptimizeIf(ne, instr->environment());
3202 
3203  Label done;
3204  Register exponent = scratch0();
3205  scratch = no_reg;
3206  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3207  // Check the sign of the argument. If the argument is positive, just
3208  // return it.
3209  __ tst(exponent, Operand(HeapNumber::kSignMask));
3210  // Move the input to the result if necessary.
3211  __ Move(result, input);
3212  __ b(eq, &done);
3213 
3214  // Input is negative. Reverse its sign.
3215  // Preserve the value of all registers.
3216  {
3217  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3218 
3219  // Registers were saved at the safepoint, so we can use
3220  // many scratch registers.
3221  Register tmp1 = input.is(r1) ? r0 : r1;
3222  Register tmp2 = input.is(r2) ? r0 : r2;
3223  Register tmp3 = input.is(r3) ? r0 : r3;
3224  Register tmp4 = input.is(r4) ? r0 : r4;
3225 
3226  // exponent: floating point exponent value.
3227 
3228  Label allocated, slow;
3229  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3230  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3231  __ b(&allocated);
3232 
3233  // Slow case: Call the runtime system to do the number allocation.
3234  __ bind(&slow);
3235 
3236  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3237  // Set the pointer to the new heap number in tmp.
3238  if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3239  // Restore input_reg after call to runtime.
3240  __ LoadFromSafepointRegisterSlot(input, input);
3241  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3242 
3243  __ bind(&allocated);
3244  // exponent: floating point exponent value.
3245  // tmp1: allocated heap number.
3246  __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3247  __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3248  __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3250 
3251  __ StoreToSafepointRegisterSlot(tmp1, result);
3252  }
3253 
3254  __ bind(&done);
3255 }
3256 
3257 
3258 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3259  Register input = ToRegister(instr->InputAt(0));
3260  Register result = ToRegister(instr->result());
3261  __ cmp(input, Operand(0));
3262  __ Move(result, input, pl);
3263  // We can make rsb conditional because the previous cmp instruction
3264  // will clear the V (overflow) flag and rsb won't set this flag
3265  // if input is positive.
3266  __ rsb(result, input, Operand(0), SetCC, mi);
3267  // Deoptimize on overflow.
3268  DeoptimizeIf(vs, instr->environment());
3269 }
3270 
3271 
3272 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3273  // Class for deferred case.
3274  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3275  public:
3276  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3277  LUnaryMathOperation* instr)
3278  : LDeferredCode(codegen), instr_(instr) { }
3279  virtual void Generate() {
3280  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3281  }
3282  virtual LInstruction* instr() { return instr_; }
3283  private:
3284  LUnaryMathOperation* instr_;
3285  };
3286 
3287  Representation r = instr->hydrogen()->value()->representation();
3288  if (r.IsDouble()) {
3289  DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
3290  DwVfpRegister result = ToDoubleRegister(instr->result());
3291  __ vabs(result, input);
3292  } else if (r.IsInteger32()) {
3293  EmitIntegerMathAbs(instr);
3294  } else {
3295  // Representation is tagged.
3296  DeferredMathAbsTaggedHeapNumber* deferred =
3297  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3298  Register input = ToRegister(instr->InputAt(0));
3299  // Smi check.
3300  __ JumpIfNotSmi(input, deferred->entry());
3301  // If smi, handle it directly.
3302  EmitIntegerMathAbs(instr);
3303  __ bind(deferred->exit());
3304  }
3305 }
3306 
3307 
3308 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3309  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3310  Register result = ToRegister(instr->result());
3311  SwVfpRegister single_scratch = double_scratch0().low();
3312  Register scratch1 = scratch0();
3313  Register scratch2 = ToRegister(instr->TempAt(0));
3314 
3315  __ EmitVFPTruncate(kRoundToMinusInf,
3316  single_scratch,
3317  input,
3318  scratch1,
3319  scratch2);
3320  DeoptimizeIf(ne, instr->environment());
3321 
3322  // Move the result back to general purpose register r0.
3323  __ vmov(result, single_scratch);
3324 
3325  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3326  // Test for -0.
3327  Label done;
3328  __ cmp(result, Operand(0));
3329  __ b(ne, &done);
3330  __ vmov(scratch1, input.high());
3331  __ tst(scratch1, Operand(HeapNumber::kSignMask));
3332  DeoptimizeIf(ne, instr->environment());
3333  __ bind(&done);
3334  }
3335 }
3336 
3337 
3338 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3339  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3340  Register result = ToRegister(instr->result());
3341  Register scratch = scratch0();
3342  Label done, check_sign_on_zero;
3343 
3344  // Extract exponent bits.
3345  __ vmov(result, input.high());
3346  __ ubfx(scratch,
3347  result,
3350 
3351  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3352  __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
3353  __ mov(result, Operand(0), LeaveCC, le);
3354  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3355  __ b(le, &check_sign_on_zero);
3356  } else {
3357  __ b(le, &done);
3358  }
3359 
3360  // The following conversion will not work with numbers
3361  // outside of ]-2^32, 2^32[.
3362  __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
3363  DeoptimizeIf(ge, instr->environment());
3364 
3365  // Save the original sign for later comparison.
3366  __ and_(scratch, result, Operand(HeapNumber::kSignMask));
3367 
3368  __ Vmov(double_scratch0(), 0.5);
3369  __ vadd(double_scratch0(), input, double_scratch0());
3370 
3371  // Check sign of the result: if the sign changed, the input
3372  // value was in ]0.5, 0[ and the result should be -0.
3373  __ vmov(result, double_scratch0().high());
3374  __ eor(result, result, Operand(scratch), SetCC);
3375  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3376  DeoptimizeIf(mi, instr->environment());
3377  } else {
3378  __ mov(result, Operand(0), LeaveCC, mi);
3379  __ b(mi, &done);
3380  }
3381 
3382  __ EmitVFPTruncate(kRoundToMinusInf,
3383  double_scratch0().low(),
3384  double_scratch0(),
3385  result,
3386  scratch);
3387  DeoptimizeIf(ne, instr->environment());
3388  __ vmov(result, double_scratch0().low());
3389 
3390  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3391  // Test for -0.
3392  __ cmp(result, Operand(0));
3393  __ b(ne, &done);
3394  __ bind(&check_sign_on_zero);
3395  __ vmov(scratch, input.high());
3396  __ tst(scratch, Operand(HeapNumber::kSignMask));
3397  DeoptimizeIf(ne, instr->environment());
3398  }
3399  __ bind(&done);
3400 }
3401 
3402 
3403 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3404  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3405  DoubleRegister result = ToDoubleRegister(instr->result());
3406  __ vsqrt(result, input);
3407 }
3408 
3409 
3410 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3411  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3412  DoubleRegister result = ToDoubleRegister(instr->result());
3413  DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
3414 
3415  // Note that according to ECMA-262 15.8.2.13:
3416  // Math.pow(-Infinity, 0.5) == Infinity
3417  // Math.sqrt(-Infinity) == NaN
3418  Label done;
3419  __ vmov(temp, -V8_INFINITY);
3420  __ VFPCompareAndSetFlags(input, temp);
3421  __ vneg(result, temp, eq);
3422  __ b(&done, eq);
3423 
3424  // Add +0 to convert -0 to +0.
3425  __ vadd(result, input, kDoubleRegZero);
3426  __ vsqrt(result, result);
3427  __ bind(&done);
3428 }
3429 
3430 
3431 void LCodeGen::DoPower(LPower* instr) {
3432  Representation exponent_type = instr->hydrogen()->right()->representation();
3433  // Having marked this as a call, we can use any registers.
3434  // Just make sure that the input/output registers are the expected ones.
3435  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3436  ToDoubleRegister(instr->InputAt(1)).is(d2));
3437  ASSERT(!instr->InputAt(1)->IsRegister() ||
3438  ToRegister(instr->InputAt(1)).is(r2));
3439  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
3440  ASSERT(ToDoubleRegister(instr->result()).is(d3));
3441 
3442  if (exponent_type.IsTagged()) {
3443  Label no_deopt;
3444  __ JumpIfSmi(r2, &no_deopt);
3446  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3447  __ cmp(r7, Operand(ip));
3448  DeoptimizeIf(ne, instr->environment());
3449  __ bind(&no_deopt);
3450  MathPowStub stub(MathPowStub::TAGGED);
3451  __ CallStub(&stub);
3452  } else if (exponent_type.IsInteger32()) {
3453  MathPowStub stub(MathPowStub::INTEGER);
3454  __ CallStub(&stub);
3455  } else {
3456  ASSERT(exponent_type.IsDouble());
3457  MathPowStub stub(MathPowStub::DOUBLE);
3458  __ CallStub(&stub);
3459  }
3460 }
3461 
3462 
3463 void LCodeGen::DoRandom(LRandom* instr) {
3464  class DeferredDoRandom: public LDeferredCode {
3465  public:
3466  DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3467  : LDeferredCode(codegen), instr_(instr) { }
3468  virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3469  virtual LInstruction* instr() { return instr_; }
3470  private:
3471  LRandom* instr_;
3472  };
3473 
3474  DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3475 
3476  // Having marked this instruction as a call we can use any
3477  // registers.
3478  ASSERT(ToDoubleRegister(instr->result()).is(d7));
3479  ASSERT(ToRegister(instr->InputAt(0)).is(r0));
3480 
3481  static const int kSeedSize = sizeof(uint32_t);
3482  STATIC_ASSERT(kPointerSize == kSeedSize);
3483 
3485  static const int kRandomSeedOffset =
3487  __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
3488  // r2: FixedArray of the global context's random seeds
3489 
3490  // Load state[0].
3492  __ cmp(r1, Operand(0));
3493  __ b(eq, deferred->entry());
3494  // Load state[1].
3495  __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
3496  // r1: state[0].
3497  // r0: state[1].
3498 
3499  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3500  __ and_(r3, r1, Operand(0xFFFF));
3501  __ mov(r4, Operand(18273));
3502  __ mul(r3, r3, r4);
3503  __ add(r1, r3, Operand(r1, LSR, 16));
3504  // Save state[0].
3506 
3507  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3508  __ and_(r3, r0, Operand(0xFFFF));
3509  __ mov(r4, Operand(36969));
3510  __ mul(r3, r3, r4);
3511  __ add(r0, r3, Operand(r0, LSR, 16));
3512  // Save state[1].
3513  __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
3514 
3515  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3516  __ and_(r0, r0, Operand(0x3FFFF));
3517  __ add(r0, r0, Operand(r1, LSL, 14));
3518 
3519  __ bind(deferred->exit());
3520  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3521  // Create this constant using mov/orr to avoid PC relative load.
3522  __ mov(r1, Operand(0x41000000));
3523  __ orr(r1, r1, Operand(0x300000));
3524  // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
3525  __ vmov(d7, r0, r1);
3526  // Move 0x4130000000000000 to VFP.
3527  __ mov(r0, Operand(0, RelocInfo::NONE));
3528  __ vmov(d8, r0, r1);
3529  // Subtract and store the result in the heap number.
3530  __ vsub(d7, d7, d8);
3531 }
3532 
3533 
3534 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3535  __ PrepareCallCFunction(1, scratch0());
3536  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3537  // Return value is in r0.
3538 }
3539 
3540 
3541 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3542  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3543  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3545  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3546 }
3547 
3548 
3549 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3550  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3551  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3553  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3554 }
3555 
3556 
3557 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3558  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3559  TranscendentalCacheStub stub(TranscendentalCache::COS,
3561  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3562 }
3563 
3564 
3565 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3566  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3567  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3569  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3570 }
3571 
3572 
3573 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3574  switch (instr->op()) {
3575  case kMathAbs:
3576  DoMathAbs(instr);
3577  break;
3578  case kMathFloor:
3579  DoMathFloor(instr);
3580  break;
3581  case kMathRound:
3582  DoMathRound(instr);
3583  break;
3584  case kMathSqrt:
3585  DoMathSqrt(instr);
3586  break;
3587  case kMathPowHalf:
3588  DoMathPowHalf(instr);
3589  break;
3590  case kMathCos:
3591  DoMathCos(instr);
3592  break;
3593  case kMathSin:
3594  DoMathSin(instr);
3595  break;
3596  case kMathTan:
3597  DoMathTan(instr);
3598  break;
3599  case kMathLog:
3600  DoMathLog(instr);
3601  break;
3602  default:
3603  Abort("Unimplemented type of LUnaryMathOperation.");
3604  UNREACHABLE();
3605  }
3606 }
3607 
3608 
3609 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3610  ASSERT(ToRegister(instr->function()).is(r1));
3611  ASSERT(instr->HasPointerMap());
3612 
3613  if (instr->known_function().is_null()) {
3614  LPointerMap* pointers = instr->pointer_map();
3615  RecordPosition(pointers->position());
3616  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3617  ParameterCount count(instr->arity());
3618  __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3620  } else {
3621  CallKnownFunction(instr->known_function(),
3622  instr->arity(),
3623  instr,
3625  R1_CONTAINS_TARGET);
3626  }
3627 }
3628 
3629 
3630 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3631  ASSERT(ToRegister(instr->result()).is(r0));
3632 
3633  int arity = instr->arity();
3634  Handle<Code> ic =
3635  isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3636  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3638 }
3639 
3640 
3641 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3642  ASSERT(ToRegister(instr->result()).is(r0));
3643 
3644  int arity = instr->arity();
3645  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3646  Handle<Code> ic =
3647  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3648  __ mov(r2, Operand(instr->name()));
3649  CallCode(ic, mode, instr);
3650  // Restore context register.
3652 }
3653 
3654 
3655 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3656  ASSERT(ToRegister(instr->function()).is(r1));
3657  ASSERT(ToRegister(instr->result()).is(r0));
3658 
3659  int arity = instr->arity();
3660  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3661  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3663 }
3664 
3665 
3666 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3667  ASSERT(ToRegister(instr->result()).is(r0));
3668 
3669  int arity = instr->arity();
3670  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3671  Handle<Code> ic =
3672  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3673  __ mov(r2, Operand(instr->name()));
3674  CallCode(ic, mode, instr);
3676 }
3677 
3678 
3679 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3680  ASSERT(ToRegister(instr->result()).is(r0));
3681  CallKnownFunction(instr->target(),
3682  instr->arity(),
3683  instr,
3685  R1_UNINITIALIZED);
3686 }
3687 
3688 
3689 void LCodeGen::DoCallNew(LCallNew* instr) {
3690  ASSERT(ToRegister(instr->InputAt(0)).is(r1));
3691  ASSERT(ToRegister(instr->result()).is(r0));
3692 
3693  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3694  __ mov(r0, Operand(instr->arity()));
3695  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3696 }
3697 
3698 
3699 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3700  CallRuntime(instr->function(), instr->arity(), instr);
3701 }
3702 
3703 
3704 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3705  Register object = ToRegister(instr->object());
3706  Register value = ToRegister(instr->value());
3707  Register scratch = scratch0();
3708  int offset = instr->offset();
3709 
3710  ASSERT(!object.is(value));
3711 
3712  if (!instr->transition().is_null()) {
3713  __ mov(scratch, Operand(instr->transition()));
3714  __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3715  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3716  Register temp = ToRegister(instr->TempAt(0));
3717  // Update the write barrier for the map field.
3718  __ RecordWriteField(object,
3720  scratch,
3721  temp,
3723  kSaveFPRegs,
3725  OMIT_SMI_CHECK);
3726  }
3727  }
3728 
3729  // Do the store.
3730  HType type = instr->hydrogen()->value()->type();
3731  SmiCheck check_needed =
3732  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3733  if (instr->is_in_object()) {
3734  __ str(value, FieldMemOperand(object, offset));
3735  if (instr->hydrogen()->NeedsWriteBarrier()) {
3736  // Update the write barrier for the object for in-object properties.
3737  __ RecordWriteField(object,
3738  offset,
3739  value,
3740  scratch,
3742  kSaveFPRegs,
3744  check_needed);
3745  }
3746  } else {
3747  __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3748  __ str(value, FieldMemOperand(scratch, offset));
3749  if (instr->hydrogen()->NeedsWriteBarrier()) {
3750  // Update the write barrier for the properties array.
3751  // object is used as a scratch register.
3752  __ RecordWriteField(scratch,
3753  offset,
3754  value,
3755  object,
3757  kSaveFPRegs,
3759  check_needed);
3760  }
3761  }
3762 }
3763 
3764 
3765 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3766  ASSERT(ToRegister(instr->object()).is(r1));
3767  ASSERT(ToRegister(instr->value()).is(r0));
3768 
3769  // Name is always in r2.
3770  __ mov(r2, Operand(instr->name()));
3771  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3772  ? isolate()->builtins()->StoreIC_Initialize_Strict()
3773  : isolate()->builtins()->StoreIC_Initialize();
3774  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3775 }
3776 
3777 
3778 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3779  __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
3780  DeoptimizeIf(hs, instr->environment());
3781 }
3782 
3783 
3784 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3785  Register value = ToRegister(instr->value());
3786  Register elements = ToRegister(instr->object());
3787  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3788  Register scratch = scratch0();
3789 
3790  // Do the store.
3791  if (instr->key()->IsConstantOperand()) {
3792  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3793  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3794  int offset =
3795  (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
3797  __ str(value, FieldMemOperand(elements, offset));
3798  } else {
3799  __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3800  if (instr->additional_index() != 0) {
3801  __ add(scratch,
3802  scratch,
3803  Operand(instr->additional_index() << kPointerSizeLog2));
3804  }
3805  __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3806  }
3807 
3808  if (instr->hydrogen()->NeedsWriteBarrier()) {
3809  HType type = instr->hydrogen()->value()->type();
3810  SmiCheck check_needed =
3811  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3812  // Compute address of modified element and store it into key register.
3813  __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3814  __ RecordWrite(elements,
3815  key,
3816  value,
3818  kSaveFPRegs,
3820  check_needed);
3821  }
3822 }
3823 
3824 
3825 void LCodeGen::DoStoreKeyedFastDoubleElement(
3826  LStoreKeyedFastDoubleElement* instr) {
3827  DwVfpRegister value = ToDoubleRegister(instr->value());
3828  Register elements = ToRegister(instr->elements());
3829  Register key = no_reg;
3830  Register scratch = scratch0();
3831  bool key_is_constant = instr->key()->IsConstantOperand();
3832  int constant_key = 0;
3833 
3834  // Calculate the effective address of the slot in the array to store the
3835  // double value.
3836  if (key_is_constant) {
3837  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3838  if (constant_key & 0xF0000000) {
3839  Abort("array index constant value too big.");
3840  }
3841  } else {
3842  key = ToRegister(instr->key());
3843  }
3845  Operand operand = key_is_constant
3846  ? Operand((constant_key << shift_size) +
3848  : Operand(key, LSL, shift_size);
3849  __ add(scratch, elements, operand);
3850  if (!key_is_constant) {
3851  __ add(scratch, scratch,
3853  }
3854 
3855  if (instr->NeedsCanonicalization()) {
3856  // Check for NaN. All NaNs must be canonicalized.
3857  __ VFPCompareAndSetFlags(value, value);
3858  // Only load canonical NaN if the comparison above set the overflow.
3859  __ Vmov(value,
3861  vs);
3862  }
3863 
3864  __ vstr(value, scratch, instr->additional_index() << shift_size);
3865 }
3866 
3867 
3868 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3869  LStoreKeyedSpecializedArrayElement* instr) {
3870 
3871  Register external_pointer = ToRegister(instr->external_pointer());
3872  Register key = no_reg;
3873  ElementsKind elements_kind = instr->elements_kind();
3874  bool key_is_constant = instr->key()->IsConstantOperand();
3875  int constant_key = 0;
3876  if (key_is_constant) {
3877  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3878  if (constant_key & 0xF0000000) {
3879  Abort("array index constant value too big.");
3880  }
3881  } else {
3882  key = ToRegister(instr->key());
3883  }
3884  int shift_size = ElementsKindToShiftSize(elements_kind);
3885  int additional_offset = instr->additional_index() << shift_size;
3886 
3887  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3888  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3889  CpuFeatures::Scope scope(VFP3);
3890  DwVfpRegister value(ToDoubleRegister(instr->value()));
3891  Operand operand(key_is_constant ? Operand(constant_key << shift_size)
3892  : Operand(key, LSL, shift_size));
3893  __ add(scratch0(), external_pointer, operand);
3894  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3895  __ vcvt_f32_f64(double_scratch0().low(), value);
3896  __ vstr(double_scratch0().low(), scratch0(), additional_offset);
3897  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3898  __ vstr(value, scratch0(), additional_offset);
3899  }
3900  } else {
3901  Register value(ToRegister(instr->value()));
3902  if (instr->additional_index() != 0 && !key_is_constant) {
3903  __ add(scratch0(), key, Operand(instr->additional_index()));
3904  }
3905  MemOperand mem_operand(key_is_constant
3906  ? MemOperand(external_pointer,
3907  ((constant_key + instr->additional_index())
3908  << shift_size))
3909  : (instr->additional_index() == 0
3910  ? MemOperand(external_pointer, key, LSL, shift_size)
3911  : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
3912  switch (elements_kind) {
3916  __ strb(value, mem_operand);
3917  break;
3920  __ strh(value, mem_operand);
3921  break;
3922  case EXTERNAL_INT_ELEMENTS:
3924  __ str(value, mem_operand);
3925  break;
3928  case FAST_DOUBLE_ELEMENTS:
3929  case FAST_ELEMENTS:
3930  case FAST_SMI_ELEMENTS:
3932  case FAST_HOLEY_ELEMENTS:
3934  case DICTIONARY_ELEMENTS:
3936  UNREACHABLE();
3937  break;
3938  }
3939  }
3940 }
3941 
3942 
3943 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3944  ASSERT(ToRegister(instr->object()).is(r2));
3945  ASSERT(ToRegister(instr->key()).is(r1));
3946  ASSERT(ToRegister(instr->value()).is(r0));
3947 
3948  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3949  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3950  : isolate()->builtins()->KeyedStoreIC_Initialize();
3951  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3952 }
3953 
3954 
3955 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3956  Register object_reg = ToRegister(instr->object());
3957  Register new_map_reg = ToRegister(instr->new_map_reg());
3958  Register scratch = scratch0();
3959 
3960  Handle<Map> from_map = instr->original_map();
3961  Handle<Map> to_map = instr->transitioned_map();
3962  ElementsKind from_kind = from_map->elements_kind();
3963  ElementsKind to_kind = to_map->elements_kind();
3964 
3965  Label not_applicable;
3966  __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3967  __ cmp(scratch, Operand(from_map));
3968  __ b(ne, &not_applicable);
3969  __ mov(new_map_reg, Operand(to_map));
3970 
3971  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
3972  __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3973  // Write barrier.
3974  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3975  scratch, kLRHasBeenSaved, kDontSaveFPRegs);
3976  } else if (IsFastSmiElementsKind(from_kind) &&
3977  IsFastDoubleElementsKind(to_kind)) {
3978  Register fixed_object_reg = ToRegister(instr->temp_reg());
3979  ASSERT(fixed_object_reg.is(r2));
3980  ASSERT(new_map_reg.is(r3));
3981  __ mov(fixed_object_reg, object_reg);
3982  CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3983  RelocInfo::CODE_TARGET, instr);
3984  } else if (IsFastDoubleElementsKind(from_kind) &&
3985  IsFastObjectElementsKind(to_kind)) {
3986  Register fixed_object_reg = ToRegister(instr->temp_reg());
3987  ASSERT(fixed_object_reg.is(r2));
3988  ASSERT(new_map_reg.is(r3));
3989  __ mov(fixed_object_reg, object_reg);
3990  CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3991  RelocInfo::CODE_TARGET, instr);
3992  } else {
3993  UNREACHABLE();
3994  }
3995  __ bind(&not_applicable);
3996 }
3997 
3998 
3999 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4000  __ push(ToRegister(instr->left()));
4001  __ push(ToRegister(instr->right()));
4002  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
4003  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4004 }
4005 
4006 
4007 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4008  class DeferredStringCharCodeAt: public LDeferredCode {
4009  public:
4010  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4011  : LDeferredCode(codegen), instr_(instr) { }
4012  virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
4013  virtual LInstruction* instr() { return instr_; }
4014  private:
4015  LStringCharCodeAt* instr_;
4016  };
4017 
4018  DeferredStringCharCodeAt* deferred =
4019  new(zone()) DeferredStringCharCodeAt(this, instr);
4020 
4022  ToRegister(instr->string()),
4023  ToRegister(instr->index()),
4024  ToRegister(instr->result()),
4025  deferred->entry());
4026  __ bind(deferred->exit());
4027 }
4028 
4029 
4030 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4031  Register string = ToRegister(instr->string());
4032  Register result = ToRegister(instr->result());
4033  Register scratch = scratch0();
4034 
4035  // TODO(3095996): Get rid of this. For now, we need to make the
4036  // result register contain a valid pointer because it is already
4037  // contained in the register pointer map.
4038  __ mov(result, Operand(0));
4039 
4040  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4041  __ push(string);
4042  // Push the index as a smi. This is safe because of the checks in
4043  // DoStringCharCodeAt above.
4044  if (instr->index()->IsConstantOperand()) {
4045  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4046  __ mov(scratch, Operand(Smi::FromInt(const_index)));
4047  __ push(scratch);
4048  } else {
4049  Register index = ToRegister(instr->index());
4050  __ SmiTag(index);
4051  __ push(index);
4052  }
4053  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4054  if (FLAG_debug_code) {
4055  __ AbortIfNotSmi(r0);
4056  }
4057  __ SmiUntag(r0);
4058  __ StoreToSafepointRegisterSlot(r0, result);
4059 }
4060 
4061 
4062 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4063  class DeferredStringCharFromCode: public LDeferredCode {
4064  public:
4065  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4066  : LDeferredCode(codegen), instr_(instr) { }
4067  virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4068  virtual LInstruction* instr() { return instr_; }
4069  private:
4070  LStringCharFromCode* instr_;
4071  };
4072 
4073  DeferredStringCharFromCode* deferred =
4074  new(zone()) DeferredStringCharFromCode(this, instr);
4075 
4076  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4077  Register char_code = ToRegister(instr->char_code());
4078  Register result = ToRegister(instr->result());
4079  ASSERT(!char_code.is(result));
4080 
4081  __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
4082  __ b(hi, deferred->entry());
4083  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4084  __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4085  __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4086  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4087  __ cmp(result, ip);
4088  __ b(eq, deferred->entry());
4089  __ bind(deferred->exit());
4090 }
4091 
4092 
4093 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4094  Register char_code = ToRegister(instr->char_code());
4095  Register result = ToRegister(instr->result());
4096 
4097  // TODO(3095996): Get rid of this. For now, we need to make the
4098  // result register contain a valid pointer because it is already
4099  // contained in the register pointer map.
4100  __ mov(result, Operand(0));
4101 
4102  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4103  __ SmiTag(char_code);
4104  __ push(char_code);
4105  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4106  __ StoreToSafepointRegisterSlot(r0, result);
4107 }
4108 
4109 
4110 void LCodeGen::DoStringLength(LStringLength* instr) {
4111  Register string = ToRegister(instr->InputAt(0));
4112  Register result = ToRegister(instr->result());
4113  __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
4114 }
4115 
4116 
4117 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4118  LOperand* input = instr->InputAt(0);
4119  ASSERT(input->IsRegister() || input->IsStackSlot());
4120  LOperand* output = instr->result();
4121  ASSERT(output->IsDoubleRegister());
4122  SwVfpRegister single_scratch = double_scratch0().low();
4123  if (input->IsStackSlot()) {
4124  Register scratch = scratch0();
4125  __ ldr(scratch, ToMemOperand(input));
4126  __ vmov(single_scratch, scratch);
4127  } else {
4128  __ vmov(single_scratch, ToRegister(input));
4129  }
4130  __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4131 }
4132 
4133 
4134 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4135  class DeferredNumberTagI: public LDeferredCode {
4136  public:
4137  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4138  : LDeferredCode(codegen), instr_(instr) { }
4139  virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
4140  virtual LInstruction* instr() { return instr_; }
4141  private:
4142  LNumberTagI* instr_;
4143  };
4144 
4145  Register src = ToRegister(instr->InputAt(0));
4146  Register dst = ToRegister(instr->result());
4147 
4148  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4149  __ SmiTag(dst, src, SetCC);
4150  __ b(vs, deferred->entry());
4151  __ bind(deferred->exit());
4152 }
4153 
4154 
4155 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
4156  Label slow;
4157  Register src = ToRegister(instr->InputAt(0));
4158  Register dst = ToRegister(instr->result());
4159  DoubleRegister dbl_scratch = double_scratch0();
4160  SwVfpRegister flt_scratch = dbl_scratch.low();
4161 
4162  // Preserve the value of all registers.
4163  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4164 
4165  // There was overflow, so bits 30 and 31 of the original integer
4166  // disagree. Try to allocate a heap number in new space and store
4167  // the value in there. If that fails, call the runtime system.
4168  Label done;
4169  if (dst.is(src)) {
4170  __ SmiUntag(src, dst);
4171  __ eor(src, src, Operand(0x80000000));
4172  }
4173  __ vmov(flt_scratch, src);
4174  __ vcvt_f64_s32(dbl_scratch, flt_scratch);
4175  if (FLAG_inline_new) {
4176  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
4177  __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
4178  __ Move(dst, r5);
4179  __ b(&done);
4180  }
4181 
4182  // Slow case: Call the runtime system to do the number allocation.
4183  __ bind(&slow);
4184 
4185  // TODO(3095996): Put a valid pointer value in the stack slot where the result
4186  // register is stored, as this register is in the pointer map, but contains an
4187  // integer value.
4188  __ mov(ip, Operand(0));
4189  __ StoreToSafepointRegisterSlot(ip, dst);
4190  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4191  __ Move(dst, r0);
4192 
4193  // Done. Put the value in dbl_scratch into the value of the allocated heap
4194  // number.
4195  __ bind(&done);
4196  __ sub(ip, dst, Operand(kHeapObjectTag));
4197  __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
4198  __ StoreToSafepointRegisterSlot(dst, dst);
4199 }
4200 
4201 
4202 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4203  class DeferredNumberTagD: public LDeferredCode {
4204  public:
4205  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4206  : LDeferredCode(codegen), instr_(instr) { }
4207  virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4208  virtual LInstruction* instr() { return instr_; }
4209  private:
4210  LNumberTagD* instr_;
4211  };
4212 
4213  DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
4214  Register scratch = scratch0();
4215  Register reg = ToRegister(instr->result());
4216  Register temp1 = ToRegister(instr->TempAt(0));
4217  Register temp2 = ToRegister(instr->TempAt(1));
4218 
4219  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4220  if (FLAG_inline_new) {
4221  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4222  __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4223  } else {
4224  __ jmp(deferred->entry());
4225  }
4226  __ bind(deferred->exit());
4227  __ sub(ip, reg, Operand(kHeapObjectTag));
4228  __ vstr(input_reg, ip, HeapNumber::kValueOffset);
4229 }
4230 
4231 
4232 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4233  // TODO(3095996): Get rid of this. For now, we need to make the
4234  // result register contain a valid pointer because it is already
4235  // contained in the register pointer map.
4236  Register reg = ToRegister(instr->result());
4237  __ mov(reg, Operand(0));
4238 
4239  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4240  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4241  __ StoreToSafepointRegisterSlot(r0, reg);
4242 }
4243 
4244 
4245 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4246  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4247  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
4248 }
4249 
4250 
4251 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4252  Register input = ToRegister(instr->InputAt(0));
4253  Register result = ToRegister(instr->result());
4254  if (instr->needs_check()) {
4256  // If the input is a HeapObject, SmiUntag will set the carry flag.
4257  __ SmiUntag(result, input, SetCC);
4258  DeoptimizeIf(cs, instr->environment());
4259  } else {
4260  __ SmiUntag(result, input);
4261  }
4262 }
4263 
4264 
4265 void LCodeGen::EmitNumberUntagD(Register input_reg,
4266  DoubleRegister result_reg,
4267  bool deoptimize_on_undefined,
4268  bool deoptimize_on_minus_zero,
4269  LEnvironment* env) {
4270  Register scratch = scratch0();
4271  SwVfpRegister flt_scratch = double_scratch0().low();
4272  ASSERT(!result_reg.is(double_scratch0()));
4273 
4274  Label load_smi, heap_number, done;
4275 
4276  // Smi check.
4277  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4278 
4279  // Heap number map check.
4280  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4281  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4282  __ cmp(scratch, Operand(ip));
4283  if (deoptimize_on_undefined) {
4284  DeoptimizeIf(ne, env);
4285  } else {
4286  Label heap_number;
4287  __ b(eq, &heap_number);
4288 
4289  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4290  __ cmp(input_reg, Operand(ip));
4291  DeoptimizeIf(ne, env);
4292 
4293  // Convert undefined to NaN.
4294  __ LoadRoot(ip, Heap::kNanValueRootIndex);
4295  __ sub(ip, ip, Operand(kHeapObjectTag));
4296  __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4297  __ jmp(&done);
4298 
4299  __ bind(&heap_number);
4300  }
4301  // Heap number to double register conversion.
4302  __ sub(ip, input_reg, Operand(kHeapObjectTag));
4303  __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4304  if (deoptimize_on_minus_zero) {
4305  __ vmov(ip, result_reg.low());
4306  __ cmp(ip, Operand(0));
4307  __ b(ne, &done);
4308  __ vmov(ip, result_reg.high());
4309  __ cmp(ip, Operand(HeapNumber::kSignMask));
4310  DeoptimizeIf(eq, env);
4311  }
4312  __ jmp(&done);
4313 
4314  // Smi to double register conversion
4315  __ bind(&load_smi);
4316  // scratch: untagged value of input_reg
4317  __ vmov(flt_scratch, scratch);
4318  __ vcvt_f64_s32(result_reg, flt_scratch);
4319  __ bind(&done);
4320 }
4321 
4322 
4323 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4324  Register input_reg = ToRegister(instr->InputAt(0));
4325  Register scratch1 = scratch0();
4326  Register scratch2 = ToRegister(instr->TempAt(0));
4327  DwVfpRegister double_scratch = double_scratch0();
4328  SwVfpRegister single_scratch = double_scratch.low();
4329 
4330  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4331  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4332 
4333  Label done;
4334 
4335  // The input was optimistically untagged; revert it.
4336  // The carry flag is set when we reach this deferred code as we just executed
4337  // SmiUntag(heap_object, SetCC)
4339  __ adc(input_reg, input_reg, Operand(input_reg));
4340 
4341  // Heap number map check.
4342  __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4343  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4344  __ cmp(scratch1, Operand(ip));
4345 
4346  if (instr->truncating()) {
4347  Register scratch3 = ToRegister(instr->TempAt(1));
4348  DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
4349  ASSERT(!scratch3.is(input_reg) &&
4350  !scratch3.is(scratch1) &&
4351  !scratch3.is(scratch2));
4352  // Performs a truncating conversion of a floating point number as used by
4353  // the JS bitwise operations.
4354  Label heap_number;
4355  __ b(eq, &heap_number);
4356  // Check for undefined. Undefined is converted to zero for truncating
4357  // conversions.
4358  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4359  __ cmp(input_reg, Operand(ip));
4360  DeoptimizeIf(ne, instr->environment());
4361  __ mov(input_reg, Operand(0));
4362  __ b(&done);
4363 
4364  __ bind(&heap_number);
4365  __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
4366  __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
4367 
4368  __ EmitECMATruncate(input_reg,
4369  double_scratch2,
4370  single_scratch,
4371  scratch1,
4372  scratch2,
4373  scratch3);
4374 
4375  } else {
4376  CpuFeatures::Scope scope(VFP3);
4377  // Deoptimize if we don't have a heap number.
4378  DeoptimizeIf(ne, instr->environment());
4379 
4380  __ sub(ip, input_reg, Operand(kHeapObjectTag));
4381  __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
4382  __ EmitVFPTruncate(kRoundToZero,
4383  single_scratch,
4384  double_scratch,
4385  scratch1,
4386  scratch2,
4388  DeoptimizeIf(ne, instr->environment());
4389  // Load the result.
4390  __ vmov(input_reg, single_scratch);
4391 
4392  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4393  __ cmp(input_reg, Operand(0));
4394  __ b(ne, &done);
4395  __ vmov(scratch1, double_scratch.high());
4396  __ tst(scratch1, Operand(HeapNumber::kSignMask));
4397  DeoptimizeIf(ne, instr->environment());
4398  }
4399  }
4400  __ bind(&done);
4401 }
4402 
4403 
4404 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4405  class DeferredTaggedToI: public LDeferredCode {
4406  public:
4407  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4408  : LDeferredCode(codegen), instr_(instr) { }
4409  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4410  virtual LInstruction* instr() { return instr_; }
4411  private:
4412  LTaggedToI* instr_;
4413  };
4414 
4415  LOperand* input = instr->InputAt(0);
4416  ASSERT(input->IsRegister());
4417  ASSERT(input->Equals(instr->result()));
4418 
4419  Register input_reg = ToRegister(input);
4420 
4421  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4422 
4423  // Optimistically untag the input.
4424  // If the input is a HeapObject, SmiUntag will set the carry flag.
4425  __ SmiUntag(input_reg, SetCC);
4426  // Branch to deferred code if the input was tagged.
4427  // The deferred code will take care of restoring the tag.
4428  __ b(cs, deferred->entry());
4429  __ bind(deferred->exit());
4430 }
4431 
4432 
4433 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4434  LOperand* input = instr->InputAt(0);
4435  ASSERT(input->IsRegister());
4436  LOperand* result = instr->result();
4437  ASSERT(result->IsDoubleRegister());
4438 
4439  Register input_reg = ToRegister(input);
4440  DoubleRegister result_reg = ToDoubleRegister(result);
4441 
4442  EmitNumberUntagD(input_reg, result_reg,
4443  instr->hydrogen()->deoptimize_on_undefined(),
4444  instr->hydrogen()->deoptimize_on_minus_zero(),
4445  instr->environment());
4446 }
4447 
4448 
4449 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4450  Register result_reg = ToRegister(instr->result());
4451  Register scratch1 = scratch0();
4452  Register scratch2 = ToRegister(instr->TempAt(0));
4453  DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
4454  SwVfpRegister single_scratch = double_scratch0().low();
4455 
4456  Label done;
4457 
4458  if (instr->truncating()) {
4459  Register scratch3 = ToRegister(instr->TempAt(1));
4460  __ EmitECMATruncate(result_reg,
4461  double_input,
4462  single_scratch,
4463  scratch1,
4464  scratch2,
4465  scratch3);
4466  } else {
4467  VFPRoundingMode rounding_mode = kRoundToMinusInf;
4468  __ EmitVFPTruncate(rounding_mode,
4469  single_scratch,
4470  double_input,
4471  scratch1,
4472  scratch2,
4474  // Deoptimize if we had a vfp invalid exception,
4475  // including inexact operation.
4476  DeoptimizeIf(ne, instr->environment());
4477  // Retrieve the result.
4478  __ vmov(result_reg, single_scratch);
4479  }
4480  __ bind(&done);
4481 }
4482 
4483 
4484 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4485  LOperand* input = instr->InputAt(0);
4486  __ tst(ToRegister(input), Operand(kSmiTagMask));
4487  DeoptimizeIf(ne, instr->environment());
4488 }
4489 
4490 
4491 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4492  LOperand* input = instr->InputAt(0);
4493  __ tst(ToRegister(input), Operand(kSmiTagMask));
4494  DeoptimizeIf(eq, instr->environment());
4495 }
4496 
4497 
4498 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4499  Register input = ToRegister(instr->InputAt(0));
4500  Register scratch = scratch0();
4501 
4502  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4503  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4504 
4505  if (instr->hydrogen()->is_interval_check()) {
4506  InstanceType first;
4507  InstanceType last;
4508  instr->hydrogen()->GetCheckInterval(&first, &last);
4509 
4510  __ cmp(scratch, Operand(first));
4511 
4512  // If there is only one type in the interval check for equality.
4513  if (first == last) {
4514  DeoptimizeIf(ne, instr->environment());
4515  } else {
4516  DeoptimizeIf(lo, instr->environment());
4517  // Omit check for the last type.
4518  if (last != LAST_TYPE) {
4519  __ cmp(scratch, Operand(last));
4520  DeoptimizeIf(hi, instr->environment());
4521  }
4522  }
4523  } else {
4524  uint8_t mask;
4525  uint8_t tag;
4526  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4527 
4528  if (IsPowerOf2(mask)) {
4529  ASSERT(tag == 0 || IsPowerOf2(tag));
4530  __ tst(scratch, Operand(mask));
4531  DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
4532  } else {
4533  __ and_(scratch, scratch, Operand(mask));
4534  __ cmp(scratch, Operand(tag));
4535  DeoptimizeIf(ne, instr->environment());
4536  }
4537  }
4538 }
4539 
4540 
4541 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4542  Register reg = ToRegister(instr->value());
4543  Handle<JSFunction> target = instr->hydrogen()->target();
4544  if (isolate()->heap()->InNewSpace(*target)) {
4545  Register reg = ToRegister(instr->value());
4546  Handle<JSGlobalPropertyCell> cell =
4547  isolate()->factory()->NewJSGlobalPropertyCell(target);
4548  __ mov(ip, Operand(Handle<Object>(cell)));
4550  __ cmp(reg, ip);
4551  } else {
4552  __ cmp(reg, Operand(target));
4553  }
4554  DeoptimizeIf(ne, instr->environment());
4555 }
4556 
4557 
4558 void LCodeGen::DoCheckMapCommon(Register reg,
4559  Register scratch,
4560  Handle<Map> map,
4561  CompareMapMode mode,
4562  LEnvironment* env) {
4563  Label success;
4564  __ CompareMap(reg, scratch, map, &success, mode);
4565  DeoptimizeIf(ne, env);
4566  __ bind(&success);
4567 }
4568 
4569 
4570 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4571  Register scratch = scratch0();
4572  LOperand* input = instr->InputAt(0);
4573  ASSERT(input->IsRegister());
4574  Register reg = ToRegister(input);
4575 
4576  Label success;
4577  SmallMapList* map_set = instr->hydrogen()->map_set();
4578  for (int i = 0; i < map_set->length() - 1; i++) {
4579  Handle<Map> map = map_set->at(i);
4580  __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
4581  __ b(eq, &success);
4582  }
4583  Handle<Map> map = map_set->last();
4584  DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
4585  __ bind(&success);
4586 }
4587 
4588 
4589 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4590  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4591  Register result_reg = ToRegister(instr->result());
4592  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4593  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4594 }
4595 
4596 
4597 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4598  Register unclamped_reg = ToRegister(instr->unclamped());
4599  Register result_reg = ToRegister(instr->result());
4600  __ ClampUint8(result_reg, unclamped_reg);
4601 }
4602 
4603 
4604 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4605  Register scratch = scratch0();
4606  Register input_reg = ToRegister(instr->unclamped());
4607  Register result_reg = ToRegister(instr->result());
4608  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4609  Label is_smi, done, heap_number;
4610 
4611  // Both smi and heap number cases are handled.
4612  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
4613 
4614  // Check for heap number
4615  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4616  __ cmp(scratch, Operand(factory()->heap_number_map()));
4617  __ b(eq, &heap_number);
4618 
4619  // Check for undefined. Undefined is converted to zero for clamping
4620  // conversions.
4621  __ cmp(input_reg, Operand(factory()->undefined_value()));
4622  DeoptimizeIf(ne, instr->environment());
4623  __ mov(result_reg, Operand(0));
4624  __ jmp(&done);
4625 
4626  // Heap number
4627  __ bind(&heap_number);
4628  __ vldr(double_scratch0(), FieldMemOperand(input_reg,
4630  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4631  __ jmp(&done);
4632 
4633  // smi
4634  __ bind(&is_smi);
4635  __ ClampUint8(result_reg, result_reg);
4636 
4637  __ bind(&done);
4638 }
4639 
4640 
4641 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4642  Register temp1 = ToRegister(instr->TempAt(0));
4643  Register temp2 = ToRegister(instr->TempAt(1));
4644 
4645  Handle<JSObject> holder = instr->holder();
4646  Handle<JSObject> current_prototype = instr->prototype();
4647 
4648  // Load prototype object.
4649  __ LoadHeapObject(temp1, current_prototype);
4650 
4651  // Check prototype maps up to the holder.
4652  while (!current_prototype.is_identical_to(holder)) {
4653  DoCheckMapCommon(temp1, temp2,
4654  Handle<Map>(current_prototype->map()),
4655  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4656  current_prototype =
4657  Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4658  // Load next prototype object.
4659  __ LoadHeapObject(temp1, current_prototype);
4660  }
4661 
4662  // Check the holder map.
4663  DoCheckMapCommon(temp1, temp2,
4664  Handle<Map>(current_prototype->map()),
4665  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4666  DeoptimizeIf(ne, instr->environment());
4667 }
4668 
4669 
4670 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4671  class DeferredAllocateObject: public LDeferredCode {
4672  public:
4673  DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4674  : LDeferredCode(codegen), instr_(instr) { }
4675  virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4676  virtual LInstruction* instr() { return instr_; }
4677  private:
4678  LAllocateObject* instr_;
4679  };
4680 
4681  DeferredAllocateObject* deferred =
4682  new(zone()) DeferredAllocateObject(this, instr);
4683 
4684  Register result = ToRegister(instr->result());
4685  Register scratch = ToRegister(instr->TempAt(0));
4686  Register scratch2 = ToRegister(instr->TempAt(1));
4687  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4688  Handle<Map> initial_map(constructor->initial_map());
4689  int instance_size = initial_map->instance_size();
4690  ASSERT(initial_map->pre_allocated_property_fields() +
4691  initial_map->unused_property_fields() -
4692  initial_map->inobject_properties() == 0);
4693 
4694  // Allocate memory for the object. The initial map might change when
4695  // the constructor's prototype changes, but instance size and property
4696  // counts remain unchanged (if slack tracking finished).
4697  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4698  __ AllocateInNewSpace(instance_size,
4699  result,
4700  scratch,
4701  scratch2,
4702  deferred->entry(),
4703  TAG_OBJECT);
4704 
4705  __ bind(deferred->exit());
4706  if (FLAG_debug_code) {
4707  Label is_in_new_space;
4708  __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4709  __ Abort("Allocated object is not in new-space");
4710  __ bind(&is_in_new_space);
4711  }
4712 
4713  // Load the initial map.
4714  Register map = scratch;
4715  __ LoadHeapObject(map, constructor);
4717 
4718  // Initialize map and fields of the newly allocated object.
4719  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4720  __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
4721  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4722  __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
4723  __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
4724  if (initial_map->inobject_properties() != 0) {
4725  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4726  for (int i = 0; i < initial_map->inobject_properties(); i++) {
4727  int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4728  __ str(scratch, FieldMemOperand(result, property_offset));
4729  }
4730  }
4731 }
4732 
4733 
4734 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4735  Register result = ToRegister(instr->result());
4736  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4737  Handle<Map> initial_map(constructor->initial_map());
4738  int instance_size = initial_map->instance_size();
4739 
4740  // TODO(3095996): Get rid of this. For now, we need to make the
4741  // result register contain a valid pointer because it is already
4742  // contained in the register pointer map.
4743  __ mov(result, Operand(0));
4744 
4745  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4746  __ mov(r0, Operand(Smi::FromInt(instance_size)));
4747  __ push(r0);
4748  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4749  __ StoreToSafepointRegisterSlot(r0, result);
4750 }
4751 
4752 
4753 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4754  Heap* heap = isolate()->heap();
4755  ElementsKind boilerplate_elements_kind =
4756  instr->hydrogen()->boilerplate_elements_kind();
4757 
4758  // Deopt if the array literal boilerplate ElementsKind is of a type different
4759  // than the expected one. The check isn't necessary if the boilerplate has
4760  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4762  boilerplate_elements_kind, true)) {
4763  __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
4764  // Load map into r2.
4766  // Load the map's "bit field 2".
4768  // Retrieve elements_kind from bit field 2.
4770  __ cmp(r2, Operand(boilerplate_elements_kind));
4771  DeoptimizeIf(ne, instr->environment());
4772  }
4773 
4776  __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4777  // Boilerplate already exists, constant elements are never accessed.
4778  // Pass an empty fixed array.
4779  __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
4780  __ Push(r3, r2, r1);
4781 
4782  // Pick the right runtime function or stub to call.
4783  int length = instr->hydrogen()->length();
4784  if (instr->hydrogen()->IsCopyOnWrite()) {
4785  ASSERT(instr->hydrogen()->depth() == 1);
4788  FastCloneShallowArrayStub stub(mode, length);
4789  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4790  } else if (instr->hydrogen()->depth() > 1) {
4791  CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4793  CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4794  } else {
4796  boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4799  FastCloneShallowArrayStub stub(mode, length);
4800  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4801  }
4802 }
4803 
4804 
4805 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4806  Register result,
4807  Register source,
4808  int* offset) {
4809  ASSERT(!source.is(r2));
4810  ASSERT(!result.is(r2));
4811 
4812  // Only elements backing stores for non-COW arrays need to be copied.
4813  Handle<FixedArrayBase> elements(object->elements());
4814  bool has_elements = elements->length() > 0 &&
4815  elements->map() != isolate()->heap()->fixed_cow_array_map();
4816 
4817  // Increase the offset so that subsequent objects end up right after
4818  // this object and its backing store.
4819  int object_offset = *offset;
4820  int object_size = object->map()->instance_size();
4821  int elements_offset = *offset + object_size;
4822  int elements_size = has_elements ? elements->Size() : 0;
4823  *offset += object_size + elements_size;
4824 
4825  // Copy object header.
4826  ASSERT(object->properties()->length() == 0);
4827  int inobject_properties = object->map()->inobject_properties();
4828  int header_size = object_size - inobject_properties * kPointerSize;
4829  for (int i = 0; i < header_size; i += kPointerSize) {
4830  if (has_elements && i == JSObject::kElementsOffset) {
4831  __ add(r2, result, Operand(elements_offset));
4832  } else {
4833  __ ldr(r2, FieldMemOperand(source, i));
4834  }
4835  __ str(r2, FieldMemOperand(result, object_offset + i));
4836  }
4837 
4838  // Copy in-object properties.
4839  for (int i = 0; i < inobject_properties; i++) {
4840  int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4841  Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4842  if (value->IsJSObject()) {
4843  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4844  __ add(r2, result, Operand(*offset));
4845  __ str(r2, FieldMemOperand(result, total_offset));
4846  __ LoadHeapObject(source, value_object);
4847  EmitDeepCopy(value_object, result, source, offset);
4848  } else if (value->IsHeapObject()) {
4849  __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
4850  __ str(r2, FieldMemOperand(result, total_offset));
4851  } else {
4852  __ mov(r2, Operand(value));
4853  __ str(r2, FieldMemOperand(result, total_offset));
4854  }
4855  }
4856 
4857  if (has_elements) {
4858  // Copy elements backing store header.
4859  __ LoadHeapObject(source, elements);
4860  for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4861  __ ldr(r2, FieldMemOperand(source, i));
4862  __ str(r2, FieldMemOperand(result, elements_offset + i));
4863  }
4864 
4865  // Copy elements backing store content.
4866  int elements_length = has_elements ? elements->length() : 0;
4867  if (elements->IsFixedDoubleArray()) {
4868  Handle<FixedDoubleArray> double_array =
4870  for (int i = 0; i < elements_length; i++) {
4871  int64_t value = double_array->get_representation(i);
4872  // We only support little endian mode...
4873  int32_t value_low = value & 0xFFFFFFFF;
4874  int32_t value_high = value >> 32;
4875  int total_offset =
4876  elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4877  __ mov(r2, Operand(value_low));
4878  __ str(r2, FieldMemOperand(result, total_offset));
4879  __ mov(r2, Operand(value_high));
4880  __ str(r2, FieldMemOperand(result, total_offset + 4));
4881  }
4882  } else if (elements->IsFixedArray()) {
4883  Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
4884  for (int i = 0; i < elements_length; i++) {
4885  int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4886  Handle<Object> value(fast_elements->get(i));
4887  if (value->IsJSObject()) {
4888  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4889  __ add(r2, result, Operand(*offset));
4890  __ str(r2, FieldMemOperand(result, total_offset));
4891  __ LoadHeapObject(source, value_object);
4892  EmitDeepCopy(value_object, result, source, offset);
4893  } else if (value->IsHeapObject()) {
4894  __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
4895  __ str(r2, FieldMemOperand(result, total_offset));
4896  } else {
4897  __ mov(r2, Operand(value));
4898  __ str(r2, FieldMemOperand(result, total_offset));
4899  }
4900  }
4901  } else {
4902  UNREACHABLE();
4903  }
4904  }
4905 }
4906 
4907 
4908 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4909  int size = instr->hydrogen()->total_size();
4910  ElementsKind boilerplate_elements_kind =
4911  instr->hydrogen()->boilerplate()->GetElementsKind();
4912 
4913  // Deopt if the array literal boilerplate ElementsKind is of a type different
4914  // than the expected one. The check isn't necessary if the boilerplate has
4915  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
4917  boilerplate_elements_kind, true)) {
4918  __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
4919  // Load map into r2.
4921  // Load the map's "bit field 2".
4923  // Retrieve elements_kind from bit field 2.
4925  __ cmp(r2, Operand(boilerplate_elements_kind));
4926  DeoptimizeIf(ne, instr->environment());
4927  }
4928 
4929  // Allocate all objects that are part of the literal in one big
4930  // allocation. This avoids multiple limit checks.
4931  Label allocated, runtime_allocate;
4932  __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
4933  __ jmp(&allocated);
4934 
4935  __ bind(&runtime_allocate);
4936  __ mov(r0, Operand(Smi::FromInt(size)));
4937  __ push(r0);
4938  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4939 
4940  __ bind(&allocated);
4941  int offset = 0;
4942  __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
4943  EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
4944  ASSERT_EQ(size, offset);
4945 }
4946 
4947 
4948 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4949  Handle<FixedArray> literals(instr->environment()->closure()->literals());
4950  Handle<FixedArray> constant_properties =
4951  instr->hydrogen()->constant_properties();
4952 
4953  // Set up the parameters to the stub/runtime call.
4954  __ LoadHeapObject(r4, literals);
4955  __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4956  __ mov(r2, Operand(constant_properties));
4957  int flags = instr->hydrogen()->fast_elements()
4960  __ mov(r1, Operand(Smi::FromInt(flags)));
4961  __ Push(r4, r3, r2, r1);
4962 
4963  // Pick the right runtime function or stub to call.
4964  int properties_count = constant_properties->length() / 2;
4965  if (instr->hydrogen()->depth() > 1) {
4966  CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4967  } else if (flags != ObjectLiteral::kFastElements ||
4969  CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4970  } else {
4971  FastCloneShallowObjectStub stub(properties_count);
4972  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4973  }
4974 }
4975 
4976 
4977 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4978  ASSERT(ToRegister(instr->InputAt(0)).is(r0));
4979  __ push(r0);
4980  CallRuntime(Runtime::kToFastProperties, 1, instr);
4981 }
4982 
4983 
4984 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4985  Label materialized;
4986  // Registers will be used as follows:
4987  // r3 = JS function.
4988  // r7 = literals array.
4989  // r1 = regexp literal.
4990  // r0 = regexp literal clone.
4991  // r2 and r4-r6 are used as temporaries.
4994  int literal_offset = FixedArray::kHeaderSize +
4995  instr->hydrogen()->literal_index() * kPointerSize;
4996  __ ldr(r1, FieldMemOperand(r7, literal_offset));
4997  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4998  __ cmp(r1, ip);
4999  __ b(ne, &materialized);
5000 
5001  // Create regexp literal using runtime function
5002  // Result will be in r0.
5003  __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5004  __ mov(r5, Operand(instr->hydrogen()->pattern()));
5005  __ mov(r4, Operand(instr->hydrogen()->flags()));
5006  __ Push(r7, r6, r5, r4);
5007  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5008  __ mov(r1, r0);
5009 
5010  __ bind(&materialized);
5012  Label allocated, runtime_allocate;
5013 
5014  __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5015  __ jmp(&allocated);
5016 
5017  __ bind(&runtime_allocate);
5018  __ mov(r0, Operand(Smi::FromInt(size)));
5019  __ Push(r1, r0);
5020  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5021  __ pop(r1);
5022 
5023  __ bind(&allocated);
5024  // Copy the content into the newly allocated memory.
5025  // (Unroll copy loop once for better throughput).
5026  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5027  __ ldr(r3, FieldMemOperand(r1, i));
5028  __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
5029  __ str(r3, FieldMemOperand(r0, i));
5030  __ str(r2, FieldMemOperand(r0, i + kPointerSize));
5031  }
5032  if ((size % (2 * kPointerSize)) != 0) {
5033  __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
5034  __ str(r3, FieldMemOperand(r0, size - kPointerSize));
5035  }
5036 }
5037 
5038 
5039 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5040  // Use the fast case closure allocation code that allocates in new
5041  // space for nested functions that don't need literals cloning.
5042  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
5043  bool pretenure = instr->hydrogen()->pretenure();
5044  if (!pretenure && shared_info->num_literals() == 0) {
5045  FastNewClosureStub stub(shared_info->language_mode());
5046  __ mov(r1, Operand(shared_info));
5047  __ push(r1);
5048  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5049  } else {
5050  __ mov(r2, Operand(shared_info));
5051  __ mov(r1, Operand(pretenure
5052  ? factory()->true_value()
5053  : factory()->false_value()));
5054  __ Push(cp, r2, r1);
5055  CallRuntime(Runtime::kNewClosure, 3, instr);
5056  }
5057 }
5058 
5059 
5060 void LCodeGen::DoTypeof(LTypeof* instr) {
5061  Register input = ToRegister(instr->InputAt(0));
5062  __ push(input);
5063  CallRuntime(Runtime::kTypeof, 1, instr);
5064 }
5065 
5066 
5067 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5068  Register input = ToRegister(instr->InputAt(0));
5069  int true_block = chunk_->LookupDestination(instr->true_block_id());
5070  int false_block = chunk_->LookupDestination(instr->false_block_id());
5071  Label* true_label = chunk_->GetAssemblyLabel(true_block);
5072  Label* false_label = chunk_->GetAssemblyLabel(false_block);
5073 
5074  Condition final_branch_condition = EmitTypeofIs(true_label,
5075  false_label,
5076  input,
5077  instr->type_literal());
5078  if (final_branch_condition != kNoCondition) {
5079  EmitBranch(true_block, false_block, final_branch_condition);
5080  }
5081 }
5082 
5083 
5084 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5085  Label* false_label,
5086  Register input,
5087  Handle<String> type_name) {
5088  Condition final_branch_condition = kNoCondition;
5089  Register scratch = scratch0();
5090  if (type_name->Equals(heap()->number_symbol())) {
5091  __ JumpIfSmi(input, true_label);
5092  __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
5093  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5094  __ cmp(input, Operand(ip));
5095  final_branch_condition = eq;
5096 
5097  } else if (type_name->Equals(heap()->string_symbol())) {
5098  __ JumpIfSmi(input, false_label);
5099  __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
5100  __ b(ge, false_label);
5101  __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5102  __ tst(ip, Operand(1 << Map::kIsUndetectable));
5103  final_branch_condition = eq;
5104 
5105  } else if (type_name->Equals(heap()->boolean_symbol())) {
5106  __ CompareRoot(input, Heap::kTrueValueRootIndex);
5107  __ b(eq, true_label);
5108  __ CompareRoot(input, Heap::kFalseValueRootIndex);
5109  final_branch_condition = eq;
5110 
5111  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
5112  __ CompareRoot(input, Heap::kNullValueRootIndex);
5113  final_branch_condition = eq;
5114 
5115  } else if (type_name->Equals(heap()->undefined_symbol())) {
5116  __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5117  __ b(eq, true_label);
5118  __ JumpIfSmi(input, false_label);
5119  // Check for undetectable objects => true.
5120  __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
5121  __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5122  __ tst(ip, Operand(1 << Map::kIsUndetectable));
5123  final_branch_condition = ne;
5124 
5125  } else if (type_name->Equals(heap()->function_symbol())) {
5127  __ JumpIfSmi(input, false_label);
5128  __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
5129  __ b(eq, true_label);
5130  __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
5131  final_branch_condition = eq;
5132 
5133  } else if (type_name->Equals(heap()->object_symbol())) {
5134  __ JumpIfSmi(input, false_label);
5135  if (!FLAG_harmony_typeof) {
5136  __ CompareRoot(input, Heap::kNullValueRootIndex);
5137  __ b(eq, true_label);
5138  }
5139  __ CompareObjectType(input, input, scratch,
5141  __ b(lt, false_label);
5142  __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5143  __ b(gt, false_label);
5144  // Check for undetectable objects => false.
5145  __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5146  __ tst(ip, Operand(1 << Map::kIsUndetectable));
5147  final_branch_condition = eq;
5148 
5149  } else {
5150  __ b(false_label);
5151  }
5152 
5153  return final_branch_condition;
5154 }
5155 
5156 
5157 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5158  Register temp1 = ToRegister(instr->TempAt(0));
5159  int true_block = chunk_->LookupDestination(instr->true_block_id());
5160  int false_block = chunk_->LookupDestination(instr->false_block_id());
5161 
5162  EmitIsConstructCall(temp1, scratch0());
5163  EmitBranch(true_block, false_block, eq);
5164 }
5165 
5166 
5167 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5168  ASSERT(!temp1.is(temp2));
5169  // Get the frame pointer for the calling frame.
5171 
5172  // Skip the arguments adaptor frame if it exists.
5173  Label check_frame_marker;
5175  __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5176  __ b(ne, &check_frame_marker);
5178 
5179  // Check the marker in the calling frame.
5180  __ bind(&check_frame_marker);
5182  __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5183 }
5184 
5185 
5186 void LCodeGen::EnsureSpaceForLazyDeopt() {
5187  // Ensure that we have enough space after the previous lazy-bailout
5188  // instruction for patching the code here.
5189  int current_pc = masm()->pc_offset();
5190  int patch_size = Deoptimizer::patch_size();
5191  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5192  // Block literal pool emission for duration of padding.
5193  Assembler::BlockConstPoolScope block_const_pool(masm());
5194  int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5195  ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5196  while (padding_size > 0) {
5197  __ nop();
5198  padding_size -= Assembler::kInstrSize;
5199  }
5200  }
5201  last_lazy_deopt_pc_ = masm()->pc_offset();
5202 }
5203 
5204 
5205 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5206  EnsureSpaceForLazyDeopt();
5207  ASSERT(instr->HasEnvironment());
5208  LEnvironment* env = instr->environment();
5209  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5210  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5211 }
5212 
5213 
5214 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5215  DeoptimizeIf(al, instr->environment());
5216 }
5217 
5218 
5219 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5220  Register object = ToRegister(instr->object());
5221  Register key = ToRegister(instr->key());
5222  Register strict = scratch0();
5223  __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
5224  __ Push(object, key, strict);
5225  ASSERT(instr->HasPointerMap());
5226  LPointerMap* pointers = instr->pointer_map();
5227  RecordPosition(pointers->position());
5228  SafepointGenerator safepoint_generator(
5229  this, pointers, Safepoint::kLazyDeopt);
5230  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
5231 }
5232 
5233 
5234 void LCodeGen::DoIn(LIn* instr) {
5235  Register obj = ToRegister(instr->object());
5236  Register key = ToRegister(instr->key());
5237  __ Push(key, obj);
5238  ASSERT(instr->HasPointerMap());
5239  LPointerMap* pointers = instr->pointer_map();
5240  RecordPosition(pointers->position());
5241  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
5242  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
5243 }
5244 
5245 
5246 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5247  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5248  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5249  RecordSafepointWithLazyDeopt(
5250  instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5251  ASSERT(instr->HasEnvironment());
5252  LEnvironment* env = instr->environment();
5253  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5254 }
5255 
5256 
5257 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5258  class DeferredStackCheck: public LDeferredCode {
5259  public:
5260  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5261  : LDeferredCode(codegen), instr_(instr) { }
5262  virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5263  virtual LInstruction* instr() { return instr_; }
5264  private:
5265  LStackCheck* instr_;
5266  };
5267 
5268  ASSERT(instr->HasEnvironment());
5269  LEnvironment* env = instr->environment();
5270  // There is no LLazyBailout instruction for stack-checks. We have to
5271  // prepare for lazy deoptimization explicitly here.
5272  if (instr->hydrogen()->is_function_entry()) {
5273  // Perform stack overflow check.
5274  Label done;
5275  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5276  __ cmp(sp, Operand(ip));
5277  __ b(hs, &done);
5278  StackCheckStub stub;
5279  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5280  EnsureSpaceForLazyDeopt();
5281  __ bind(&done);
5282  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5283  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5284  } else {
5285  ASSERT(instr->hydrogen()->is_backwards_branch());
5286  // Perform stack overflow check if this goto needs it before jumping.
5287  DeferredStackCheck* deferred_stack_check =
5288  new(zone()) DeferredStackCheck(this, instr);
5289  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5290  __ cmp(sp, Operand(ip));
5291  __ b(lo, deferred_stack_check->entry());
5292  EnsureSpaceForLazyDeopt();
5293  __ bind(instr->done_label());
5294  deferred_stack_check->SetExit(instr->done_label());
5295  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5296  // Don't record a deoptimization index for the safepoint here.
5297  // This will be done explicitly when emitting call and the safepoint in
5298  // the deferred code.
5299  }
5300 }
5301 
5302 
5303 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5304  // This is a pseudo-instruction that ensures that the environment here is
5305  // properly registered for deoptimization and records the assembler's PC
5306  // offset.
5307  LEnvironment* environment = instr->environment();
5308  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5309  instr->SpilledDoubleRegisterArray());
5310 
5311  // If the environment were already registered, we would have no way of
5312  // backpatching it with the spill slot operands.
5313  ASSERT(!environment->HasBeenRegistered());
5314  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5315  ASSERT(osr_pc_offset_ == -1);
5316  osr_pc_offset_ = masm()->pc_offset();
5317 }
5318 
5319 
5320 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5321  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5322  __ cmp(r0, ip);
5323  DeoptimizeIf(eq, instr->environment());
5324 
5325  Register null_value = r5;
5326  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5327  __ cmp(r0, null_value);
5328  DeoptimizeIf(eq, instr->environment());
5329 
5330  __ tst(r0, Operand(kSmiTagMask));
5331  DeoptimizeIf(eq, instr->environment());
5332 
5334  __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5335  DeoptimizeIf(le, instr->environment());
5336 
5337  Label use_cache, call_runtime;
5338  __ CheckEnumCache(null_value, &call_runtime);
5339 
5341  __ b(&use_cache);
5342 
5343  // Get the set of properties to enumerate.
5344  __ bind(&call_runtime);
5345  __ push(r0);
5346  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5347 
5349  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5350  __ cmp(r1, ip);
5351  DeoptimizeIf(ne, instr->environment());
5352  __ bind(&use_cache);
5353 }
5354 
5355 
5356 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5357  Register map = ToRegister(instr->map());
5358  Register result = ToRegister(instr->result());
5359  __ LoadInstanceDescriptors(map, result);
5360  __ ldr(result,
5362  __ ldr(result,
5363  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5364  __ cmp(result, Operand(0));
5365  DeoptimizeIf(eq, instr->environment());
5366 }
5367 
5368 
5369 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5370  Register object = ToRegister(instr->value());
5371  Register map = ToRegister(instr->map());
5372  __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5373  __ cmp(map, scratch0());
5374  DeoptimizeIf(ne, instr->environment());
5375 }
5376 
5377 
5378 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5379  Register object = ToRegister(instr->object());
5380  Register index = ToRegister(instr->index());
5381  Register result = ToRegister(instr->result());
5382  Register scratch = scratch0();
5383 
5384  Label out_of_object, done;
5385  __ cmp(index, Operand(0));
5386  __ b(lt, &out_of_object);
5387 
5389  __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5390  __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5391 
5392  __ b(&done);
5393 
5394  __ bind(&out_of_object);
5395  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5396  // Index is equal to negated out of object property index plus 1.
5397  __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5398  __ ldr(result, FieldMemOperand(scratch,
5399  FixedArray::kHeaderSize - kPointerSize));
5400  __ bind(&done);
5401 }
5402 
5403 
5404 #undef __
5405 
5406 } } // namespace v8::internal
byte * Address
Definition: globals.h:172
const Register cp
const int kMinInt
Definition: globals.h:225
static const int kBitFieldOffset
Definition: objects.h:4994
static LGap * cast(LInstruction *instr)
Definition: lithium-arm.h:318
const uint32_t kVFPZConditionFlagBit
const intptr_t kSmiTagMask
Definition: v8.h:3855
bool is_int24(int x)
Definition: assembler.h:833
static const int kCodeEntryOffset
Definition: objects.h:5981
static const int kMaxAsciiCharCode
Definition: objects.h:7107
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:5982
const char * ToCString(const v8::String::Utf8Value &value)
static int SlotOffset(int index)
Definition: contexts.h:408
const Register r3
const DivMagicNumbers DivMagicNumberFor(int32_t divisor)
Definition: utils.cc:93
void PrintF(const char *format,...)
Definition: v8utils.cc:40
static Smi * FromInt(int value)
Definition: objects-inl.h:973
bool IsFastObjectElementsKind(ElementsKind kind)
const DwVfpRegister d8
const int KB
Definition: globals.h:221
static const int kElementsKindBitCount
Definition: objects.h:5016
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
Definition: handles.h:81
static const int kGlobalReceiverOffset
Definition: objects.h:6085
const Register r6
Flag flags[]
Definition: flags.cc:1467
static const int kExponentBias
Definition: objects.h:1321
int int32_t
Definition: unicode.cc:47
static const int kExternalPointerOffset
Definition: objects.h:3720
static const int kSize
Definition: objects.h:6433
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
friend class BlockConstPoolScope
const int kPointerSizeLog2
Definition: globals.h:246
static const int kInObjectFieldCount
Definition: objects.h:6487
static const int kMaximumSlots
Definition: code-stubs.h:343
MemOperand GlobalObjectOperand()
const Register r2
static const int kInstanceClassNameOffset
Definition: objects.h:5609
int WhichPowerOf2(uint32_t x)
Definition: utils.h:56
static const int kGlobalContextOffset
Definition: objects.h:6084
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:5986
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
Definition: handles.cc:326
#define V8_INFINITY
Definition: globals.h:32
static const int kHashFieldOffset
Definition: objects.h:7099
static DwVfpRegister FromAllocationIndex(int index)
Condition ReverseCondition(Condition cond)
#define IN
const Register sp
const uint32_t kSlotsZapValue
Definition: v8globals.h:92
static const int kLiteralsOffset
Definition: objects.h:5987
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7098
static const int kExponentShift
Definition: objects.h:1322
static const int kValueOffset
Definition: objects.h:1307
const uint32_t kHoleNanUpper32
Definition: v8globals.h:476
const Register ip
const int kPointerSize
Definition: globals.h:234
static const int kPcLoadDelta
const DwVfpRegister d7
const int kHeapObjectTag
Definition: v8.h:3848
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:269
const uint32_t kHoleNanLower32
Definition: v8globals.h:477
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
static void VPrint(const char *format, va_list args)
static const int kCacheStampOffset
Definition: objects.h:6280
const Register pc
static const int kPropertiesOffset
Definition: objects.h:2113
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
Definition: objects.h:2374
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
const DwVfpRegister d3
const Register r0
static const int kElementsOffset
Definition: objects.h:2114
static const int kContainsCachedArrayIndexMask
Definition: objects.h:7154
bool IsPowerOf2(T x)
Definition: utils.h:50
static Vector< T > New(int length)
Definition: utils.h:369
int ElementsKindToShiftSize(ElementsKind elements_kind)
Definition: lithium.cc:213
Vector< const char > CStrVector(const char *data)
Definition: utils.h:525
static int OffsetOfElementAt(int index)
Definition: objects.h:2291
static const int kLengthOffset
Definition: objects.h:8111
static int SizeFor(int length)
Definition: objects.h:2288
static const int kHeaderSize
Definition: objects.h:2233
static const int kEnumerationIndexOffset
Definition: objects.h:2622
const Register lr
static const int kMapOffset
Definition: objects.h:1219
static const int kValueOffset
Definition: objects.h:6272
bool is(Register reg) const
const Register r1
static const int kLengthOffset
Definition: objects.h:2232
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:445
MemOperand FieldMemOperand(Register object, int offset)
#define __
static const int kHasNonInstancePrototype
Definition: objects.h:5001
ElementsKind GetInitialFastElementsKind()
const uint32_t kVFPVConditionFlagBit
static const uint32_t kSignMask
Definition: objects.h:1316
const int kSmiTagSize
Definition: v8.h:3854
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static const int kElementsKindShift
Definition: objects.h:5015
static const int kConstructorOffset
Definition: objects.h:4954
const DwVfpRegister d2
static double canonical_not_the_hole_nan_as_double()
Definition: objects-inl.h:1714
static const int kIsUndetectable
Definition: objects.h:5005
static const int kHeaderSize
Definition: objects.h:2115
static const int kInstrSize
static const int kPrototypeOffset
Definition: objects.h:4953
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static const int kValueOffset
Definition: objects.h:6188
const DwVfpRegister d1
const Register fp
static const int kExponentBits
Definition: objects.h:1320
static const int kCompilerHintsOffset
Definition: objects.h:5677
static const int kSharedFunctionInfoOffset
Definition: objects.h:5984
Register ToRegister(int num)
static const int kBitField2Offset
Definition: objects.h:4995
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
Definition: ic.cc:2544
#define ARRAY_SIZE(a)
Definition: globals.h:295
static const int kExponentOffset
Definition: objects.h:1313
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1223
static JSObject * cast(Object *obj)
FlagType type() const
Definition: flags.cc:1358
const Register r5
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
Definition: objects.h:4992
virtual void BeforeCall(int call_size) const
static const int kMantissaOffset
Definition: objects.h:1312
const Register r4
const Register r7