v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 
40  public:
41  SafepointGenerator(LCodeGen* codegen,
42  LPointerMap* pointers,
43  Safepoint::DeoptMode mode)
44  : codegen_(codegen),
45  pointers_(pointers),
46  deopt_mode_(mode) { }
47  virtual ~SafepointGenerator() { }
48 
49  virtual void BeforeCall(int call_size) const { }
50 
51  virtual void AfterCall() const {
52  codegen_->RecordSafepoint(pointers_, deopt_mode_);
53  }
54 
55  private:
56  LCodeGen* codegen_;
57  LPointerMap* pointers_;
58  Safepoint::DeoptMode deopt_mode_;
59 };
60 
61 
62 #define __ masm()->
63 
64 bool LCodeGen::GenerateCode() {
65  HPhase phase("Z_Code generation", chunk());
66  ASSERT(is_unused());
67  status_ = GENERATING;
68  CpuFeatures::Scope scope1(VFP3);
69  CpuFeatures::Scope scope2(ARMv7);
70 
71  CodeStub::GenerateFPStubs();
72 
73  // Open a frame scope to indicate that there is a frame on the stack. The
74  // NONE indicates that the scope shouldn't actually generate code to set up
75  // the frame (that is done in GeneratePrologue).
76  FrameScope frame_scope(masm_, StackFrame::NONE);
77 
78  return GeneratePrologue() &&
79  GenerateBody() &&
80  GenerateDeferredCode() &&
81  GenerateDeoptJumpTable() &&
82  GenerateSafepointTable();
83 }
84 
85 
86 void LCodeGen::FinishCode(Handle<Code> code) {
87  ASSERT(is_done());
88  code->set_stack_slots(GetStackSlotCount());
89  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90  PopulateDeoptimizationData(code);
91 }
92 
93 
94 void LCodeGen::Abort(const char* reason) {
95  info()->set_bailout_reason(reason);
96  status_ = ABORTED;
97 }
98 
99 
100 void LCodeGen::Comment(const char* format, ...) {
101  if (!FLAG_code_comments) return;
102  char buffer[4 * KB];
103  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
104  va_list arguments;
105  va_start(arguments, format);
106  builder.AddFormattedList(format, arguments);
107  va_end(arguments);
108 
109  // Copy the string before recording it in the assembler to avoid
110  // issues when the stack allocated buffer goes out of scope.
111  size_t length = builder.position();
112  Vector<char> copy = Vector<char>::New(length + 1);
113  memcpy(copy.start(), builder.Finalize(), copy.length());
114  masm()->RecordComment(copy.start());
115 }
116 
117 
118 bool LCodeGen::GeneratePrologue() {
119  ASSERT(is_generating());
120 
122 
123 #ifdef DEBUG
124  if (strlen(FLAG_stop_at) > 0 &&
125  info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
126  __ stop("stop_at");
127  }
128 #endif
129 
130  // r1: Callee's JS function.
131  // cp: Callee's context.
132  // fp: Caller's frame pointer.
133  // lr: Caller's pc.
134 
135  // Strict mode functions and builtins need to replace the receiver
136  // with undefined when called as functions (without an explicit
137  // receiver object). r5 is zero for method calls and non-zero for
138  // function calls.
139  if (!info_->is_classic_mode() || info_->is_native()) {
140  Label ok;
141  __ cmp(r5, Operand(0));
142  __ b(eq, &ok);
143  int receiver_offset = scope()->num_parameters() * kPointerSize;
144  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
145  __ str(r2, MemOperand(sp, receiver_offset));
146  __ bind(&ok);
147  }
148 
149  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
150  __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
151 
152  // Reserve space for the stack slots needed by the code.
153  int slots = GetStackSlotCount();
154  if (slots > 0) {
155  if (FLAG_debug_code) {
156  __ mov(r0, Operand(slots));
157  __ mov(r2, Operand(kSlotsZapValue));
158  Label loop;
159  __ bind(&loop);
160  __ push(r2);
161  __ sub(r0, r0, Operand(1), SetCC);
162  __ b(ne, &loop);
163  } else {
164  __ sub(sp, sp, Operand(slots * kPointerSize));
165  }
166  }
167 
168  // Possibly allocate a local context.
169  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
170  if (heap_slots > 0) {
171  Comment(";;; Allocate local context");
172  // Argument to NewContext is the function, which is in r1.
173  __ push(r1);
174  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
175  FastNewContextStub stub(heap_slots);
176  __ CallStub(&stub);
177  } else {
178  __ CallRuntime(Runtime::kNewFunctionContext, 1);
179  }
180  RecordSafepoint(Safepoint::kNoLazyDeopt);
181  // Context is returned in both r0 and cp. It replaces the context
182  // passed to us. It's saved in the stack and kept live in cp.
184  // Copy any necessary parameters into the context.
185  int num_parameters = scope()->num_parameters();
186  for (int i = 0; i < num_parameters; i++) {
187  Variable* var = scope()->parameter(i);
188  if (var->IsContextSlot()) {
189  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
190  (num_parameters - 1 - i) * kPointerSize;
191  // Load parameter from stack.
192  __ ldr(r0, MemOperand(fp, parameter_offset));
193  // Store it in the context.
194  MemOperand target = ContextOperand(cp, var->index());
195  __ str(r0, target);
196  // Update the write barrier. This clobbers r3 and r0.
197  __ RecordWriteContextSlot(
198  cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
199  }
200  }
201  Comment(";;; End allocate local context");
202  }
203 
204  // Trace the call.
205  if (FLAG_trace) {
206  __ CallRuntime(Runtime::kTraceEnter, 0);
207  }
208  return !is_aborted();
209 }
210 
211 
212 bool LCodeGen::GenerateBody() {
213  ASSERT(is_generating());
214  bool emit_instructions = true;
215  for (current_instruction_ = 0;
216  !is_aborted() && current_instruction_ < instructions_->length();
217  current_instruction_++) {
218  LInstruction* instr = instructions_->at(current_instruction_);
219  if (instr->IsLabel()) {
220  LLabel* label = LLabel::cast(instr);
221  emit_instructions = !label->HasReplacement();
222  }
223 
224  if (emit_instructions) {
225  Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
226  instr->CompileToNative(this);
227  }
228  }
229  EnsureSpaceForLazyDeopt();
230  return !is_aborted();
231 }
232 
233 
234 bool LCodeGen::GenerateDeferredCode() {
235  ASSERT(is_generating());
236  if (deferred_.length() > 0) {
237  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
238  LDeferredCode* code = deferred_[i];
239  __ bind(code->entry());
240  Comment(";;; Deferred code @%d: %s.",
241  code->instruction_index(),
242  code->instr()->Mnemonic());
243  code->Generate();
244  __ jmp(code->exit());
245  }
246  }
247 
248  // Force constant pool emission at the end of the deferred code to make
249  // sure that no constant pools are emitted after.
250  masm()->CheckConstPool(true, false);
251 
252  return !is_aborted();
253 }
254 
255 
256 bool LCodeGen::GenerateDeoptJumpTable() {
257  // Check that the jump table is accessible from everywhere in the function
258  // code, i.e. that offsets to the table can be encoded in the 24bit signed
259  // immediate of a branch instruction.
260  // To simplify we consider the code size from the first instruction to the
261  // end of the jump table. We also don't consider the pc load delta.
262  // Each entry in the jump table generates one instruction and inlines one
263  // 32bit data after it.
264  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
265  deopt_jump_table_.length() * 2)) {
266  Abort("Generated code is too large");
267  }
268 
269  // Block the constant pool emission during the jump table emission.
270  __ BlockConstPoolFor(deopt_jump_table_.length());
271  __ RecordComment("[ Deoptimisation jump table");
272  Label table_start;
273  __ bind(&table_start);
274  for (int i = 0; i < deopt_jump_table_.length(); i++) {
275  __ bind(&deopt_jump_table_[i].label);
277  __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
278  }
279  ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
280  deopt_jump_table_.length() * 2);
281  __ RecordComment("]");
282 
283  // The deoptimization jump table is the last part of the instruction
284  // sequence. Mark the generated code as done unless we bailed out.
285  if (!is_aborted()) status_ = DONE;
286  return !is_aborted();
287 }
288 
289 
290 bool LCodeGen::GenerateSafepointTable() {
291  ASSERT(is_done());
292  safepoints_.Emit(masm(), GetStackSlotCount());
293  return !is_aborted();
294 }
295 
296 
297 Register LCodeGen::ToRegister(int index) const {
298  return Register::FromAllocationIndex(index);
299 }
300 
301 
302 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
304 }
305 
306 
307 Register LCodeGen::ToRegister(LOperand* op) const {
308  ASSERT(op->IsRegister());
309  return ToRegister(op->index());
310 }
311 
312 
313 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
314  if (op->IsRegister()) {
315  return ToRegister(op->index());
316  } else if (op->IsConstantOperand()) {
317  LConstantOperand* const_op = LConstantOperand::cast(op);
318  HConstant* constant = chunk_->LookupConstant(const_op);
319  Handle<Object> literal = constant->handle();
320  Representation r = chunk_->LookupLiteralRepresentation(const_op);
321  if (r.IsInteger32()) {
322  ASSERT(literal->IsNumber());
323  __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
324  } else if (r.IsDouble()) {
325  Abort("EmitLoadRegister: Unsupported double immediate.");
326  } else {
327  ASSERT(r.IsTagged());
328  if (literal->IsSmi()) {
329  __ mov(scratch, Operand(literal));
330  } else {
331  __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
332  }
333  }
334  return scratch;
335  } else if (op->IsStackSlot() || op->IsArgument()) {
336  __ ldr(scratch, ToMemOperand(op));
337  return scratch;
338  }
339  UNREACHABLE();
340  return scratch;
341 }
342 
343 
344 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
345  ASSERT(op->IsDoubleRegister());
346  return ToDoubleRegister(op->index());
347 }
348 
349 
350 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
351  SwVfpRegister flt_scratch,
352  DoubleRegister dbl_scratch) {
353  if (op->IsDoubleRegister()) {
354  return ToDoubleRegister(op->index());
355  } else if (op->IsConstantOperand()) {
356  LConstantOperand* const_op = LConstantOperand::cast(op);
357  HConstant* constant = chunk_->LookupConstant(const_op);
358  Handle<Object> literal = constant->handle();
359  Representation r = chunk_->LookupLiteralRepresentation(const_op);
360  if (r.IsInteger32()) {
361  ASSERT(literal->IsNumber());
362  __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
363  __ vmov(flt_scratch, ip);
364  __ vcvt_f64_s32(dbl_scratch, flt_scratch);
365  return dbl_scratch;
366  } else if (r.IsDouble()) {
367  Abort("unsupported double immediate");
368  } else if (r.IsTagged()) {
369  Abort("unsupported tagged immediate");
370  }
371  } else if (op->IsStackSlot() || op->IsArgument()) {
372  // TODO(regis): Why is vldr not taking a MemOperand?
373  // __ vldr(dbl_scratch, ToMemOperand(op));
374  MemOperand mem_op = ToMemOperand(op);
375  __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
376  return dbl_scratch;
377  }
378  UNREACHABLE();
379  return dbl_scratch;
380 }
381 
382 
383 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
384  HConstant* constant = chunk_->LookupConstant(op);
385  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
386  return constant->handle();
387 }
388 
389 
390 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
391  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
392 }
393 
394 
395 int LCodeGen::ToInteger32(LConstantOperand* op) const {
396  HConstant* constant = chunk_->LookupConstant(op);
397  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
398  ASSERT(constant->HasInteger32Value());
399  return constant->Integer32Value();
400 }
401 
402 
403 double LCodeGen::ToDouble(LConstantOperand* op) const {
404  HConstant* constant = chunk_->LookupConstant(op);
405  ASSERT(constant->HasDoubleValue());
406  return constant->DoubleValue();
407 }
408 
409 
410 Operand LCodeGen::ToOperand(LOperand* op) {
411  if (op->IsConstantOperand()) {
412  LConstantOperand* const_op = LConstantOperand::cast(op);
413  HConstant* constant = chunk()->LookupConstant(const_op);
414  Representation r = chunk_->LookupLiteralRepresentation(const_op);
415  if (r.IsInteger32()) {
416  ASSERT(constant->HasInteger32Value());
417  return Operand(constant->Integer32Value());
418  } else if (r.IsDouble()) {
419  Abort("ToOperand Unsupported double immediate.");
420  }
421  ASSERT(r.IsTagged());
422  return Operand(constant->handle());
423  } else if (op->IsRegister()) {
424  return Operand(ToRegister(op));
425  } else if (op->IsDoubleRegister()) {
426  Abort("ToOperand IsDoubleRegister unimplemented");
427  return Operand(0);
428  }
429  // Stack slots not implemented, use ToMemOperand instead.
430  UNREACHABLE();
431  return Operand(0);
432 }
433 
434 
435 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
436  ASSERT(!op->IsRegister());
437  ASSERT(!op->IsDoubleRegister());
438  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
439  int index = op->index();
440  if (index >= 0) {
441  // Local or spill slot. Skip the frame pointer, function, and
442  // context in the fixed part of the frame.
443  return MemOperand(fp, -(index + 3) * kPointerSize);
444  } else {
445  // Incoming parameter. Skip the return address.
446  return MemOperand(fp, -(index - 1) * kPointerSize);
447  }
448 }
449 
450 
451 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
452  ASSERT(op->IsDoubleStackSlot());
453  int index = op->index();
454  if (index >= 0) {
455  // Local or spill slot. Skip the frame pointer, function, context,
456  // and the first word of the double in the fixed part of the frame.
457  return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
458  } else {
459  // Incoming parameter. Skip the return address and the first word of
460  // the double.
461  return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
462  }
463 }
464 
465 
466 void LCodeGen::WriteTranslation(LEnvironment* environment,
467  Translation* translation,
468  int* arguments_index,
469  int* arguments_count) {
470  if (environment == NULL) return;
471 
472  // The translation includes one command per value in the environment.
473  int translation_size = environment->values()->length();
474  // The output frame height does not include the parameters.
475  int height = translation_size - environment->parameter_count();
476 
477  // Function parameters are arguments to the outermost environment. The
478  // arguments index points to the first element of a sequence of tagged
479  // values on the stack that represent the arguments. This needs to be
480  // kept in sync with the LArgumentsElements implementation.
481  *arguments_index = -environment->parameter_count();
482  *arguments_count = environment->parameter_count();
483 
484  WriteTranslation(environment->outer(),
485  translation,
486  arguments_index,
487  arguments_count);
488  int closure_id = *info()->closure() != *environment->closure()
489  ? DefineDeoptimizationLiteral(environment->closure())
490  : Translation::kSelfLiteralId;
491 
492  switch (environment->frame_type()) {
493  case JS_FUNCTION:
494  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
495  break;
496  case JS_CONSTRUCT:
497  translation->BeginConstructStubFrame(closure_id, translation_size);
498  break;
499  case JS_GETTER:
500  ASSERT(translation_size == 1);
501  ASSERT(height == 0);
502  translation->BeginGetterStubFrame(closure_id);
503  break;
504  case JS_SETTER:
505  ASSERT(translation_size == 2);
506  ASSERT(height == 0);
507  translation->BeginSetterStubFrame(closure_id);
508  break;
509  case ARGUMENTS_ADAPTOR:
510  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
511  break;
512  }
513 
514  // Inlined frames which push their arguments cause the index to be
515  // bumped and a new stack area to be used for materialization.
516  if (environment->entry() != NULL &&
517  environment->entry()->arguments_pushed()) {
518  *arguments_index = *arguments_index < 0
519  ? GetStackSlotCount()
520  : *arguments_index + *arguments_count;
521  *arguments_count = environment->entry()->arguments_count() + 1;
522  }
523 
524  for (int i = 0; i < translation_size; ++i) {
525  LOperand* value = environment->values()->at(i);
526  // spilled_registers_ and spilled_double_registers_ are either
527  // both NULL or both set.
528  if (environment->spilled_registers() != NULL && value != NULL) {
529  if (value->IsRegister() &&
530  environment->spilled_registers()[value->index()] != NULL) {
531  translation->MarkDuplicate();
532  AddToTranslation(translation,
533  environment->spilled_registers()[value->index()],
534  environment->HasTaggedValueAt(i),
535  environment->HasUint32ValueAt(i),
536  *arguments_index,
537  *arguments_count);
538  } else if (
539  value->IsDoubleRegister() &&
540  environment->spilled_double_registers()[value->index()] != NULL) {
541  translation->MarkDuplicate();
542  AddToTranslation(
543  translation,
544  environment->spilled_double_registers()[value->index()],
545  false,
546  false,
547  *arguments_index,
548  *arguments_count);
549  }
550  }
551 
552  AddToTranslation(translation,
553  value,
554  environment->HasTaggedValueAt(i),
555  environment->HasUint32ValueAt(i),
556  *arguments_index,
557  *arguments_count);
558  }
559 }
560 
561 
562 void LCodeGen::AddToTranslation(Translation* translation,
563  LOperand* op,
564  bool is_tagged,
565  bool is_uint32,
566  int arguments_index,
567  int arguments_count) {
568  if (op == NULL) {
569  // TODO(twuerthinger): Introduce marker operands to indicate that this value
570  // is not present and must be reconstructed from the deoptimizer. Currently
571  // this is only used for the arguments object.
572  translation->StoreArgumentsObject(arguments_index, arguments_count);
573  } else if (op->IsStackSlot()) {
574  if (is_tagged) {
575  translation->StoreStackSlot(op->index());
576  } else if (is_uint32) {
577  translation->StoreUint32StackSlot(op->index());
578  } else {
579  translation->StoreInt32StackSlot(op->index());
580  }
581  } else if (op->IsDoubleStackSlot()) {
582  translation->StoreDoubleStackSlot(op->index());
583  } else if (op->IsArgument()) {
584  ASSERT(is_tagged);
585  int src_index = GetStackSlotCount() + op->index();
586  translation->StoreStackSlot(src_index);
587  } else if (op->IsRegister()) {
588  Register reg = ToRegister(op);
589  if (is_tagged) {
590  translation->StoreRegister(reg);
591  } else if (is_uint32) {
592  translation->StoreUint32Register(reg);
593  } else {
594  translation->StoreInt32Register(reg);
595  }
596  } else if (op->IsDoubleRegister()) {
597  DoubleRegister reg = ToDoubleRegister(op);
598  translation->StoreDoubleRegister(reg);
599  } else if (op->IsConstantOperand()) {
600  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
601  int src_index = DefineDeoptimizationLiteral(constant->handle());
602  translation->StoreLiteral(src_index);
603  } else {
604  UNREACHABLE();
605  }
606 }
607 
608 
609 void LCodeGen::CallCode(Handle<Code> code,
610  RelocInfo::Mode mode,
611  LInstruction* instr,
612  TargetAddressStorageMode storage_mode) {
613  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
614 }
615 
616 
617 void LCodeGen::CallCodeGeneric(Handle<Code> code,
618  RelocInfo::Mode mode,
619  LInstruction* instr,
620  SafepointMode safepoint_mode,
621  TargetAddressStorageMode storage_mode) {
622  ASSERT(instr != NULL);
623  // Block literal pool emission to ensure nop indicating no inlined smi code
624  // is in the correct position.
625  Assembler::BlockConstPoolScope block_const_pool(masm());
626  LPointerMap* pointers = instr->pointer_map();
627  RecordPosition(pointers->position());
628  __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
629  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
630 
631  // Signal that we don't inline smi code before these stubs in the
632  // optimizing code generator.
633  if (code->kind() == Code::BINARY_OP_IC ||
634  code->kind() == Code::COMPARE_IC) {
635  __ nop();
636  }
637 }
638 
639 
640 void LCodeGen::CallRuntime(const Runtime::Function* function,
641  int num_arguments,
642  LInstruction* instr) {
643  ASSERT(instr != NULL);
644  LPointerMap* pointers = instr->pointer_map();
645  ASSERT(pointers != NULL);
646  RecordPosition(pointers->position());
647 
648  __ CallRuntime(function, num_arguments);
649  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
650 }
651 
652 
653 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
654  int argc,
655  LInstruction* instr) {
656  __ CallRuntimeSaveDoubles(id);
657  RecordSafepointWithRegisters(
658  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
659 }
660 
661 
662 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
663  Safepoint::DeoptMode mode) {
664  if (!environment->HasBeenRegistered()) {
665  // Physical stack frame layout:
666  // -x ............. -4 0 ..................................... y
667  // [incoming arguments] [spill slots] [pushed outgoing arguments]
668 
669  // Layout of the environment:
670  // 0 ..................................................... size-1
671  // [parameters] [locals] [expression stack including arguments]
672 
673  // Layout of the translation:
674  // 0 ........................................................ size - 1 + 4
675  // [expression stack including arguments] [locals] [4 words] [parameters]
676  // |>------------ translation_size ------------<|
677 
678  int frame_count = 0;
679  int jsframe_count = 0;
680  int args_index = 0;
681  int args_count = 0;
682  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
683  ++frame_count;
684  if (e->frame_type() == JS_FUNCTION) {
685  ++jsframe_count;
686  }
687  }
688  Translation translation(&translations_, frame_count, jsframe_count, zone());
689  WriteTranslation(environment, &translation, &args_index, &args_count);
690  int deoptimization_index = deoptimizations_.length();
691  int pc_offset = masm()->pc_offset();
692  environment->Register(deoptimization_index,
693  translation.index(),
694  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
695  deoptimizations_.Add(environment, zone());
696  }
697 }
698 
699 
700 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
701  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
702  ASSERT(environment->HasBeenRegistered());
703  int id = environment->deoptimization_index();
705  if (entry == NULL) {
706  Abort("bailout was not prepared");
707  return;
708  }
709 
710  ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
711 
712  if (FLAG_deopt_every_n_times == 1 &&
713  info_->shared_info()->opt_count() == id) {
714  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
715  return;
716  }
717 
718  if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
719 
720  if (cc == al) {
721  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
722  } else {
723  // We often have several deopts to the same entry, reuse the last
724  // jump entry if this is the case.
725  if (deopt_jump_table_.is_empty() ||
726  (deopt_jump_table_.last().address != entry)) {
727  deopt_jump_table_.Add(JumpTableEntry(entry), zone());
728  }
729  __ b(cc, &deopt_jump_table_.last().label);
730  }
731 }
732 
733 
734 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
735  int length = deoptimizations_.length();
736  if (length == 0) return;
737  Handle<DeoptimizationInputData> data =
738  factory()->NewDeoptimizationInputData(length, TENURED);
739 
740  Handle<ByteArray> translations = translations_.CreateByteArray();
741  data->SetTranslationByteArray(*translations);
742  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
743 
744  Handle<FixedArray> literals =
745  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
746  for (int i = 0; i < deoptimization_literals_.length(); i++) {
747  literals->set(i, *deoptimization_literals_[i]);
748  }
749  data->SetLiteralArray(*literals);
750 
751  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
752  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
753 
754  // Populate the deoptimization entries.
755  for (int i = 0; i < length; i++) {
756  LEnvironment* env = deoptimizations_[i];
757  data->SetAstId(i, env->ast_id());
758  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
759  data->SetArgumentsStackHeight(i,
760  Smi::FromInt(env->arguments_stack_height()));
761  data->SetPc(i, Smi::FromInt(env->pc_offset()));
762  }
763  code->set_deoptimization_data(*data);
764 }
765 
766 
767 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
768  int result = deoptimization_literals_.length();
769  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
770  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
771  }
772  deoptimization_literals_.Add(literal, zone());
773  return result;
774 }
775 
776 
777 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
778  ASSERT(deoptimization_literals_.length() == 0);
779 
780  const ZoneList<Handle<JSFunction> >* inlined_closures =
781  chunk()->inlined_closures();
782 
783  for (int i = 0, length = inlined_closures->length();
784  i < length;
785  i++) {
786  DefineDeoptimizationLiteral(inlined_closures->at(i));
787  }
788 
789  inlined_function_count_ = deoptimization_literals_.length();
790 }
791 
792 
793 void LCodeGen::RecordSafepointWithLazyDeopt(
794  LInstruction* instr, SafepointMode safepoint_mode) {
795  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
796  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
797  } else {
798  ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
799  RecordSafepointWithRegisters(
800  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
801  }
802 }
803 
804 
805 void LCodeGen::RecordSafepoint(
806  LPointerMap* pointers,
807  Safepoint::Kind kind,
808  int arguments,
809  Safepoint::DeoptMode deopt_mode) {
810  ASSERT(expected_safepoint_kind_ == kind);
811 
812  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
813  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
814  kind, arguments, deopt_mode);
815  for (int i = 0; i < operands->length(); i++) {
816  LOperand* pointer = operands->at(i);
817  if (pointer->IsStackSlot()) {
818  safepoint.DefinePointerSlot(pointer->index(), zone());
819  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
820  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
821  }
822  }
823  if (kind & Safepoint::kWithRegisters) {
824  // Register cp always contains a pointer to the context.
825  safepoint.DefinePointerRegister(cp, zone());
826  }
827 }
828 
829 
830 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
831  Safepoint::DeoptMode deopt_mode) {
832  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
833 }
834 
835 
836 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
837  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
838  RecordSafepoint(&empty_pointers, deopt_mode);
839 }
840 
841 
842 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
843  int arguments,
844  Safepoint::DeoptMode deopt_mode) {
845  RecordSafepoint(
846  pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
847 }
848 
849 
850 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
851  LPointerMap* pointers,
852  int arguments,
853  Safepoint::DeoptMode deopt_mode) {
854  RecordSafepoint(
855  pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
856 }
857 
858 
859 void LCodeGen::RecordPosition(int position) {
860  if (position == RelocInfo::kNoPosition) return;
861  masm()->positions_recorder()->RecordPosition(position);
862 }
863 
864 
865 void LCodeGen::DoLabel(LLabel* label) {
866  if (label->is_loop_header()) {
867  Comment(";;; B%d - LOOP entry", label->block_id());
868  } else {
869  Comment(";;; B%d", label->block_id());
870  }
871  __ bind(label->label());
872  current_block_ = label->block_id();
873  DoGap(label);
874 }
875 
876 
877 void LCodeGen::DoParallelMove(LParallelMove* move) {
878  resolver_.Resolve(move);
879 }
880 
881 
882 void LCodeGen::DoGap(LGap* gap) {
883  for (int i = LGap::FIRST_INNER_POSITION;
885  i++) {
886  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
887  LParallelMove* move = gap->GetParallelMove(inner_pos);
888  if (move != NULL) DoParallelMove(move);
889  }
890 }
891 
892 
893 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
894  DoGap(instr);
895 }
896 
897 
898 void LCodeGen::DoParameter(LParameter* instr) {
899  // Nothing to do.
900 }
901 
902 
903 void LCodeGen::DoCallStub(LCallStub* instr) {
904  ASSERT(ToRegister(instr->result()).is(r0));
905  switch (instr->hydrogen()->major_key()) {
906  case CodeStub::RegExpConstructResult: {
907  RegExpConstructResultStub stub;
908  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
909  break;
910  }
911  case CodeStub::RegExpExec: {
912  RegExpExecStub stub;
913  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
914  break;
915  }
916  case CodeStub::SubString: {
917  SubStringStub stub;
918  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
919  break;
920  }
921  case CodeStub::NumberToString: {
922  NumberToStringStub stub;
923  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
924  break;
925  }
926  case CodeStub::StringAdd: {
927  StringAddStub stub(NO_STRING_ADD_FLAGS);
928  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
929  break;
930  }
931  case CodeStub::StringCompare: {
932  StringCompareStub stub;
933  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
934  break;
935  }
936  case CodeStub::TranscendentalCache: {
937  __ ldr(r0, MemOperand(sp, 0));
938  TranscendentalCacheStub stub(instr->transcendental_type(),
940  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
941  break;
942  }
943  default:
944  UNREACHABLE();
945  }
946 }
947 
948 
949 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
950  // Nothing to do.
951 }
952 
953 
954 void LCodeGen::DoModI(LModI* instr) {
955  if (instr->hydrogen()->HasPowerOf2Divisor()) {
956  Register dividend = ToRegister(instr->left());
957  Register result = ToRegister(instr->result());
958 
959  int32_t divisor =
960  HConstant::cast(instr->hydrogen()->right())->Integer32Value();
961 
962  if (divisor < 0) divisor = -divisor;
963 
964  Label positive_dividend, done;
965  __ cmp(dividend, Operand(0));
966  __ b(pl, &positive_dividend);
967  __ rsb(result, dividend, Operand(0));
968  __ and_(result, result, Operand(divisor - 1), SetCC);
969  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
970  DeoptimizeIf(eq, instr->environment());
971  }
972  __ rsb(result, result, Operand(0));
973  __ b(&done);
974  __ bind(&positive_dividend);
975  __ and_(result, dividend, Operand(divisor - 1));
976  __ bind(&done);
977  return;
978  }
979 
980  // These registers hold untagged 32 bit values.
981  Register left = ToRegister(instr->left());
982  Register right = ToRegister(instr->right());
983  Register result = ToRegister(instr->result());
984  Label done;
985 
987  CpuFeatures::Scope scope(SUDIV);
988  // Check for x % 0.
989  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
990  __ cmp(right, Operand(0));
991  DeoptimizeIf(eq, instr->environment());
992  }
993 
994  // For r3 = r1 % r2; we can have the following ARM code
995  // sdiv r3, r1, r2
996  // mls r3, r3, r2, r1
997 
998  __ sdiv(result, left, right);
999  __ mls(result, result, right, left);
1000  __ cmp(result, Operand(0));
1001  __ b(ne, &done);
1002 
1003  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1004  __ cmp(left, Operand(0));
1005  DeoptimizeIf(lt, instr->environment());
1006  }
1007  } else {
1008  Register scratch = scratch0();
1009  Register scratch2 = ToRegister(instr->temp());
1010  DwVfpRegister dividend = ToDoubleRegister(instr->temp2());
1011  DwVfpRegister divisor = ToDoubleRegister(instr->temp3());
1012  DwVfpRegister quotient = double_scratch0();
1013 
1014  ASSERT(!dividend.is(divisor));
1015  ASSERT(!dividend.is(quotient));
1016  ASSERT(!divisor.is(quotient));
1017  ASSERT(!scratch.is(left));
1018  ASSERT(!scratch.is(right));
1019  ASSERT(!scratch.is(result));
1020 
1021  Label vfp_modulo, both_positive, right_negative;
1022 
1023  // Check for x % 0.
1024  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1025  __ cmp(right, Operand(0));
1026  DeoptimizeIf(eq, instr->environment());
1027  }
1028 
1029  __ Move(result, left);
1030 
1031  // (0 % x) must yield 0 (if x is finite, which is the case here).
1032  __ cmp(left, Operand(0));
1033  __ b(eq, &done);
1034  // Preload right in a vfp register.
1035  __ vmov(divisor.low(), right);
1036  __ b(lt, &vfp_modulo);
1037 
1038  __ cmp(left, Operand(right));
1039  __ b(lt, &done);
1040 
1041  // Check for (positive) power of two on the right hand side.
1042  __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
1043  scratch,
1044  &right_negative,
1045  &both_positive);
1046  // Perform modulo operation (scratch contains right - 1).
1047  __ and_(result, scratch, Operand(left));
1048  __ b(&done);
1049 
1050  __ bind(&right_negative);
1051  // Negate right. The sign of the divisor does not matter.
1052  __ rsb(right, right, Operand(0));
1053 
1054  __ bind(&both_positive);
1055  const int kUnfolds = 3;
1056  // If the right hand side is smaller than the (nonnegative)
1057  // left hand side, the left hand side is the result.
1058  // Else try a few subtractions of the left hand side.
1059  __ mov(scratch, left);
1060  for (int i = 0; i < kUnfolds; i++) {
1061  // Check if the left hand side is less or equal than the
1062  // the right hand side.
1063  __ cmp(scratch, Operand(right));
1064  __ mov(result, scratch, LeaveCC, lt);
1065  __ b(lt, &done);
1066  // If not, reduce the left hand side by the right hand
1067  // side and check again.
1068  if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
1069  }
1070 
1071  __ bind(&vfp_modulo);
1072  // Load the arguments in VFP registers.
1073  // The divisor value is preloaded before. Be careful that 'right'
1074  // is only live on entry.
1075  __ vmov(dividend.low(), left);
1076  // From here on don't use right as it may have been reallocated
1077  // (for example to scratch2).
1078  right = no_reg;
1079 
1080  __ vcvt_f64_s32(dividend, dividend.low());
1081  __ vcvt_f64_s32(divisor, divisor.low());
1082 
1083  // We do not care about the sign of the divisor.
1084  __ vabs(divisor, divisor);
1085  // Compute the quotient and round it to a 32bit integer.
1086  __ vdiv(quotient, dividend, divisor);
1087  __ vcvt_s32_f64(quotient.low(), quotient);
1088  __ vcvt_f64_s32(quotient, quotient.low());
1089 
1090  // Compute the remainder in result.
1091  DwVfpRegister double_scratch = dividend;
1092  __ vmul(double_scratch, divisor, quotient);
1093  __ vcvt_s32_f64(double_scratch.low(), double_scratch);
1094  __ vmov(scratch, double_scratch.low());
1095 
1096  if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1097  __ sub(result, left, scratch);
1098  } else {
1099  Label ok;
1100  // Check for -0.
1101  __ sub(scratch2, left, scratch, SetCC);
1102  __ b(ne, &ok);
1103  __ cmp(left, Operand(0));
1104  DeoptimizeIf(mi, instr->environment());
1105  __ bind(&ok);
1106  // Load the result and we are done.
1107  __ mov(result, scratch2);
1108  }
1109  }
1110  __ bind(&done);
1111 }
1112 
1113 
1114 void LCodeGen::EmitSignedIntegerDivisionByConstant(
1115  Register result,
1116  Register dividend,
1117  int32_t divisor,
1118  Register remainder,
1119  Register scratch,
1120  LEnvironment* environment) {
1121  ASSERT(!AreAliased(dividend, scratch, ip));
1122  ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1123 
1124  uint32_t divisor_abs = abs(divisor);
1125 
1126  int32_t power_of_2_factor =
1127  CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1128 
1129  switch (divisor_abs) {
1130  case 0:
1131  DeoptimizeIf(al, environment);
1132  return;
1133 
1134  case 1:
1135  if (divisor > 0) {
1136  __ Move(result, dividend);
1137  } else {
1138  __ rsb(result, dividend, Operand(0), SetCC);
1139  DeoptimizeIf(vs, environment);
1140  }
1141  // Compute the remainder.
1142  __ mov(remainder, Operand(0));
1143  return;
1144 
1145  default:
1146  if (IsPowerOf2(divisor_abs)) {
1147  // Branch and condition free code for integer division by a power
1148  // of two.
1149  int32_t power = WhichPowerOf2(divisor_abs);
1150  if (power > 1) {
1151  __ mov(scratch, Operand(dividend, ASR, power - 1));
1152  }
1153  __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
1154  __ mov(result, Operand(scratch, ASR, power));
1155  // Negate if necessary.
1156  // We don't need to check for overflow because the case '-1' is
1157  // handled separately.
1158  if (divisor < 0) {
1159  ASSERT(divisor != -1);
1160  __ rsb(result, result, Operand(0));
1161  }
1162  // Compute the remainder.
1163  if (divisor > 0) {
1164  __ sub(remainder, dividend, Operand(result, LSL, power));
1165  } else {
1166  __ add(remainder, dividend, Operand(result, LSL, power));
1167  }
1168  return;
1169  } else {
1170  // Use magic numbers for a few specific divisors.
1171  // Details and proofs can be found in:
1172  // - Hacker's Delight, Henry S. Warren, Jr.
1173  // - The PowerPC Compiler Writer’s Guide
1174  // and probably many others.
1175  //
1176  // We handle
1177  // <divisor with magic numbers> * <power of 2>
1178  // but not
1179  // <divisor with magic numbers> * <other divisor with magic numbers>
1180  DivMagicNumbers magic_numbers =
1181  DivMagicNumberFor(divisor_abs >> power_of_2_factor);
1182  // Branch and condition free code for integer division by a power
1183  // of two.
1184  const int32_t M = magic_numbers.M;
1185  const int32_t s = magic_numbers.s + power_of_2_factor;
1186 
1187  __ mov(ip, Operand(M));
1188  __ smull(ip, scratch, dividend, ip);
1189  if (M < 0) {
1190  __ add(scratch, scratch, Operand(dividend));
1191  }
1192  if (s > 0) {
1193  __ mov(scratch, Operand(scratch, ASR, s));
1194  }
1195  __ add(result, scratch, Operand(dividend, LSR, 31));
1196  if (divisor < 0) __ rsb(result, result, Operand(0));
1197  // Compute the remainder.
1198  __ mov(ip, Operand(divisor));
1199  // This sequence could be replaced with 'mls' when
1200  // it gets implemented.
1201  __ mul(scratch, result, ip);
1202  __ sub(remainder, dividend, scratch);
1203  }
1204  }
1205 }
1206 
1207 
1208 void LCodeGen::DoDivI(LDivI* instr) {
1209  class DeferredDivI: public LDeferredCode {
1210  public:
1211  DeferredDivI(LCodeGen* codegen, LDivI* instr)
1212  : LDeferredCode(codegen), instr_(instr) { }
1213  virtual void Generate() {
1214  codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(),
1215  instr_->left(),
1216  instr_->right(),
1217  Token::DIV);
1218  }
1219  virtual LInstruction* instr() { return instr_; }
1220  private:
1221  LDivI* instr_;
1222  };
1223 
1224  const Register left = ToRegister(instr->left());
1225  const Register right = ToRegister(instr->right());
1226  const Register scratch = scratch0();
1227  const Register result = ToRegister(instr->result());
1228 
1229  // Check for x / 0.
1230  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1231  __ cmp(right, Operand(0));
1232  DeoptimizeIf(eq, instr->environment());
1233  }
1234 
1235  // Check for (0 / -x) that will produce negative zero.
1236  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1237  Label left_not_zero;
1238  __ cmp(left, Operand(0));
1239  __ b(ne, &left_not_zero);
1240  __ cmp(right, Operand(0));
1241  DeoptimizeIf(mi, instr->environment());
1242  __ bind(&left_not_zero);
1243  }
1244 
1245  // Check for (-kMinInt / -1).
1246  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1247  Label left_not_min_int;
1248  __ cmp(left, Operand(kMinInt));
1249  __ b(ne, &left_not_min_int);
1250  __ cmp(right, Operand(-1));
1251  DeoptimizeIf(eq, instr->environment());
1252  __ bind(&left_not_min_int);
1253  }
1254 
1255  Label done, deoptimize;
1256  // Test for a few common cases first.
1257  __ cmp(right, Operand(1));
1258  __ mov(result, left, LeaveCC, eq);
1259  __ b(eq, &done);
1260 
1261  __ cmp(right, Operand(2));
1262  __ tst(left, Operand(1), eq);
1263  __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
1264  __ b(eq, &done);
1265 
1266  __ cmp(right, Operand(4));
1267  __ tst(left, Operand(3), eq);
1268  __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
1269  __ b(eq, &done);
1270 
1271  // Call the stub. The numbers in r0 and r1 have
1272  // to be tagged to Smis. If that is not possible, deoptimize.
1273  DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
1274 
1275  __ TrySmiTag(left, &deoptimize, scratch);
1276  __ TrySmiTag(right, &deoptimize, scratch);
1277 
1278  __ b(al, deferred->entry());
1279  __ bind(deferred->exit());
1280 
1281  // If the result in r0 is a Smi, untag it, else deoptimize.
1282  __ JumpIfNotSmi(result, &deoptimize);
1283  __ SmiUntag(result);
1284  __ b(&done);
1285 
1286  __ bind(&deoptimize);
1287  DeoptimizeIf(al, instr->environment());
1288  __ bind(&done);
1289 }
1290 
1291 
1292 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1293  const Register result = ToRegister(instr->result());
1294  const Register left = ToRegister(instr->left());
1295  const Register remainder = ToRegister(instr->temp());
1296  const Register scratch = scratch0();
1297 
1298  // We only optimize this for division by constants, because the standard
1299  // integer division routine is usually slower than transitionning to VFP.
1300  // This could be optimized on processors with SDIV available.
1301  ASSERT(instr->right()->IsConstantOperand());
1302  int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1303  if (divisor < 0) {
1304  __ cmp(left, Operand(0));
1305  DeoptimizeIf(eq, instr->environment());
1306  }
1307  EmitSignedIntegerDivisionByConstant(result,
1308  left,
1309  divisor,
1310  remainder,
1311  scratch,
1312  instr->environment());
1313  // We operated a truncating division. Correct the result if necessary.
1314  __ cmp(remainder, Operand(0));
1315  __ teq(remainder, Operand(divisor), ne);
1316  __ sub(result, result, Operand(1), LeaveCC, mi);
1317 }
1318 
1319 
1320 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
1321  LOperand* left_argument,
1322  LOperand* right_argument,
1323  Token::Value op) {
1324  Register left = ToRegister(left_argument);
1325  Register right = ToRegister(right_argument);
1326 
1327  PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
1328  // Move left to r1 and right to r0 for the stub call.
1329  if (left.is(r1)) {
1330  __ Move(r0, right);
1331  } else if (left.is(r0) && right.is(r1)) {
1332  __ Swap(r0, r1, r2);
1333  } else if (left.is(r0)) {
1334  ASSERT(!right.is(r1));
1335  __ mov(r1, r0);
1336  __ mov(r0, right);
1337  } else {
1338  ASSERT(!left.is(r0) && !right.is(r0));
1339  __ mov(r0, right);
1340  __ mov(r1, left);
1341  }
1342  BinaryOpStub stub(op, OVERWRITE_LEFT);
1343  __ CallStub(&stub);
1344  RecordSafepointWithRegistersAndDoubles(pointer_map,
1345  0,
1346  Safepoint::kNoLazyDeopt);
1347  // Overwrite the stored value of r0 with the result of the stub.
1348  __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
1349 }
1350 
1351 
1352 void LCodeGen::DoMulI(LMulI* instr) {
1353  Register scratch = scratch0();
1354  Register result = ToRegister(instr->result());
1355  // Note that result may alias left.
1356  Register left = ToRegister(instr->left());
1357  LOperand* right_op = instr->right();
1358 
1359  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1360  bool bailout_on_minus_zero =
1361  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1362 
1363  if (right_op->IsConstantOperand() && !can_overflow) {
1364  // Use optimized code for specific constants.
1365  int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1366 
1367  if (bailout_on_minus_zero && (constant < 0)) {
1368  // The case of a null constant will be handled separately.
1369  // If constant is negative and left is null, the result should be -0.
1370  __ cmp(left, Operand(0));
1371  DeoptimizeIf(eq, instr->environment());
1372  }
1373 
1374  switch (constant) {
1375  case -1:
1376  __ rsb(result, left, Operand(0));
1377  break;
1378  case 0:
1379  if (bailout_on_minus_zero) {
1380  // If left is strictly negative and the constant is null, the
1381  // result is -0. Deoptimize if required, otherwise return 0.
1382  __ cmp(left, Operand(0));
1383  DeoptimizeIf(mi, instr->environment());
1384  }
1385  __ mov(result, Operand(0));
1386  break;
1387  case 1:
1388  __ Move(result, left);
1389  break;
1390  default:
1391  // Multiplying by powers of two and powers of two plus or minus
1392  // one can be done faster with shifted operands.
1393  // For other constants we emit standard code.
1394  int32_t mask = constant >> 31;
1395  uint32_t constant_abs = (constant + mask) ^ mask;
1396 
1397  if (IsPowerOf2(constant_abs) ||
1398  IsPowerOf2(constant_abs - 1) ||
1399  IsPowerOf2(constant_abs + 1)) {
1400  if (IsPowerOf2(constant_abs)) {
1401  int32_t shift = WhichPowerOf2(constant_abs);
1402  __ mov(result, Operand(left, LSL, shift));
1403  } else if (IsPowerOf2(constant_abs - 1)) {
1404  int32_t shift = WhichPowerOf2(constant_abs - 1);
1405  __ add(result, left, Operand(left, LSL, shift));
1406  } else if (IsPowerOf2(constant_abs + 1)) {
1407  int32_t shift = WhichPowerOf2(constant_abs + 1);
1408  __ rsb(result, left, Operand(left, LSL, shift));
1409  }
1410 
1411  // Correct the sign of the result is the constant is negative.
1412  if (constant < 0) __ rsb(result, result, Operand(0));
1413 
1414  } else {
1415  // Generate standard code.
1416  __ mov(ip, Operand(constant));
1417  __ mul(result, left, ip);
1418  }
1419  }
1420 
1421  } else {
1422  Register right = EmitLoadRegister(right_op, scratch);
1423  if (bailout_on_minus_zero) {
1424  __ orr(ToRegister(instr->temp()), left, right);
1425  }
1426 
1427  if (can_overflow) {
1428  // scratch:result = left * right.
1429  __ smull(result, scratch, left, right);
1430  __ cmp(scratch, Operand(result, ASR, 31));
1431  DeoptimizeIf(ne, instr->environment());
1432  } else {
1433  __ mul(result, left, right);
1434  }
1435 
1436  if (bailout_on_minus_zero) {
1437  // Bail out if the result is supposed to be negative zero.
1438  Label done;
1439  __ cmp(result, Operand(0));
1440  __ b(ne, &done);
1441  __ cmp(ToRegister(instr->temp()), Operand(0));
1442  DeoptimizeIf(mi, instr->environment());
1443  __ bind(&done);
1444  }
1445  }
1446 }
1447 
1448 
1449 void LCodeGen::DoBitI(LBitI* instr) {
1450  LOperand* left_op = instr->left();
1451  LOperand* right_op = instr->right();
1452  ASSERT(left_op->IsRegister());
1453  Register left = ToRegister(left_op);
1454  Register result = ToRegister(instr->result());
1455  Operand right(no_reg);
1456 
1457  if (right_op->IsStackSlot() || right_op->IsArgument()) {
1458  right = Operand(EmitLoadRegister(right_op, ip));
1459  } else {
1460  ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1461  right = ToOperand(right_op);
1462  }
1463 
1464  switch (instr->op()) {
1465  case Token::BIT_AND:
1466  __ and_(result, left, right);
1467  break;
1468  case Token::BIT_OR:
1469  __ orr(result, left, right);
1470  break;
1471  case Token::BIT_XOR:
1472  __ eor(result, left, right);
1473  break;
1474  default:
1475  UNREACHABLE();
1476  break;
1477  }
1478 }
1479 
1480 
1481 void LCodeGen::DoShiftI(LShiftI* instr) {
1482  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1483  // result may alias either of them.
1484  LOperand* right_op = instr->right();
1485  Register left = ToRegister(instr->left());
1486  Register result = ToRegister(instr->result());
1487  Register scratch = scratch0();
1488  if (right_op->IsRegister()) {
1489  // Mask the right_op operand.
1490  __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1491  switch (instr->op()) {
1492  case Token::SAR:
1493  __ mov(result, Operand(left, ASR, scratch));
1494  break;
1495  case Token::SHR:
1496  if (instr->can_deopt()) {
1497  __ mov(result, Operand(left, LSR, scratch), SetCC);
1498  DeoptimizeIf(mi, instr->environment());
1499  } else {
1500  __ mov(result, Operand(left, LSR, scratch));
1501  }
1502  break;
1503  case Token::SHL:
1504  __ mov(result, Operand(left, LSL, scratch));
1505  break;
1506  default:
1507  UNREACHABLE();
1508  break;
1509  }
1510  } else {
1511  // Mask the right_op operand.
1512  int value = ToInteger32(LConstantOperand::cast(right_op));
1513  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1514  switch (instr->op()) {
1515  case Token::SAR:
1516  if (shift_count != 0) {
1517  __ mov(result, Operand(left, ASR, shift_count));
1518  } else {
1519  __ Move(result, left);
1520  }
1521  break;
1522  case Token::SHR:
1523  if (shift_count != 0) {
1524  __ mov(result, Operand(left, LSR, shift_count));
1525  } else {
1526  if (instr->can_deopt()) {
1527  __ tst(left, Operand(0x80000000));
1528  DeoptimizeIf(ne, instr->environment());
1529  }
1530  __ Move(result, left);
1531  }
1532  break;
1533  case Token::SHL:
1534  if (shift_count != 0) {
1535  __ mov(result, Operand(left, LSL, shift_count));
1536  } else {
1537  __ Move(result, left);
1538  }
1539  break;
1540  default:
1541  UNREACHABLE();
1542  break;
1543  }
1544  }
1545 }
1546 
1547 
1548 void LCodeGen::DoSubI(LSubI* instr) {
1549  LOperand* left = instr->left();
1550  LOperand* right = instr->right();
1551  LOperand* result = instr->result();
1552  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1553  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1554 
1555  if (right->IsStackSlot() || right->IsArgument()) {
1556  Register right_reg = EmitLoadRegister(right, ip);
1557  __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1558  } else {
1559  ASSERT(right->IsRegister() || right->IsConstantOperand());
1560  __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1561  }
1562 
1563  if (can_overflow) {
1564  DeoptimizeIf(vs, instr->environment());
1565  }
1566 }
1567 
1568 
1569 void LCodeGen::DoConstantI(LConstantI* instr) {
1570  ASSERT(instr->result()->IsRegister());
1571  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1572 }
1573 
1574 
1575 void LCodeGen::DoConstantD(LConstantD* instr) {
1576  ASSERT(instr->result()->IsDoubleRegister());
1577  DwVfpRegister result = ToDoubleRegister(instr->result());
1578  double v = instr->value();
1579  __ Vmov(result, v, scratch0());
1580 }
1581 
1582 
1583 void LCodeGen::DoConstantT(LConstantT* instr) {
1584  Handle<Object> value = instr->value();
1585  if (value->IsSmi()) {
1586  __ mov(ToRegister(instr->result()), Operand(value));
1587  } else {
1588  __ LoadHeapObject(ToRegister(instr->result()),
1589  Handle<HeapObject>::cast(value));
1590  }
1591 }
1592 
1593 
1594 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1595  Register result = ToRegister(instr->result());
1596  Register array = ToRegister(instr->value());
1597  __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
1598 }
1599 
1600 
1601 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1602  Register result = ToRegister(instr->result());
1603  Register array = ToRegister(instr->value());
1604  __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1605 }
1606 
1607 
1608 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1609  Register result = ToRegister(instr->result());
1610  Register map = ToRegister(instr->value());
1611  __ EnumLength(result, map);
1612 }
1613 
1614 
1615 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1616  Register result = ToRegister(instr->result());
1617  Register input = ToRegister(instr->value());
1618 
1619  // Load map into |result|.
1620  __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
1621  // Load the map's "bit field 2" into |result|. We only need the first byte,
1622  // but the following bit field extraction takes care of that anyway.
1623  __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
1624  // Retrieve elements_kind from bit field 2.
1625  __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1626 }
1627 
1628 
1629 void LCodeGen::DoValueOf(LValueOf* instr) {
1630  Register input = ToRegister(instr->value());
1631  Register result = ToRegister(instr->result());
1632  Register map = ToRegister(instr->temp());
1633  Label done;
1634 
1635  // If the object is a smi return the object.
1636  __ tst(input, Operand(kSmiTagMask));
1637  __ Move(result, input, eq);
1638  __ b(eq, &done);
1639 
1640  // If the object is not a value type, return the object.
1641  __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
1642  __ Move(result, input, ne);
1643  __ b(ne, &done);
1644  __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
1645 
1646  __ bind(&done);
1647 }
1648 
1649 
1650 void LCodeGen::DoDateField(LDateField* instr) {
1651  Register object = ToRegister(instr->date());
1652  Register result = ToRegister(instr->result());
1653  Register scratch = ToRegister(instr->temp());
1654  Smi* index = instr->index();
1655  Label runtime, done;
1656  ASSERT(object.is(result));
1657  ASSERT(object.is(r0));
1658  ASSERT(!scratch.is(scratch0()));
1659  ASSERT(!scratch.is(object));
1660 
1661  __ tst(object, Operand(kSmiTagMask));
1662  DeoptimizeIf(eq, instr->environment());
1663  __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1664  DeoptimizeIf(ne, instr->environment());
1665 
1666  if (index->value() == 0) {
1667  __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1668  } else {
1669  if (index->value() < JSDate::kFirstUncachedField) {
1670  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1671  __ mov(scratch, Operand(stamp));
1672  __ ldr(scratch, MemOperand(scratch));
1673  __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1674  __ cmp(scratch, scratch0());
1675  __ b(ne, &runtime);
1676  __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1677  kPointerSize * index->value()));
1678  __ jmp(&done);
1679  }
1680  __ bind(&runtime);
1681  __ PrepareCallCFunction(2, scratch);
1682  __ mov(r1, Operand(index));
1683  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1684  __ bind(&done);
1685  }
1686 }
1687 
1688 
1689 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1690  Register input = ToRegister(instr->value());
1691  Register result = ToRegister(instr->result());
1692  __ mvn(result, Operand(input));
1693 }
1694 
1695 
1696 void LCodeGen::DoThrow(LThrow* instr) {
1697  Register input_reg = EmitLoadRegister(instr->value(), ip);
1698  __ push(input_reg);
1699  CallRuntime(Runtime::kThrow, 1, instr);
1700 
1701  if (FLAG_debug_code) {
1702  __ stop("Unreachable code.");
1703  }
1704 }
1705 
1706 
1707 void LCodeGen::DoAddI(LAddI* instr) {
1708  LOperand* left = instr->left();
1709  LOperand* right = instr->right();
1710  LOperand* result = instr->result();
1711  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1712  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1713 
1714  if (right->IsStackSlot() || right->IsArgument()) {
1715  Register right_reg = EmitLoadRegister(right, ip);
1716  __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1717  } else {
1718  ASSERT(right->IsRegister() || right->IsConstantOperand());
1719  __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1720  }
1721 
1722  if (can_overflow) {
1723  DeoptimizeIf(vs, instr->environment());
1724  }
1725 }
1726 
1727 
1728 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1729  LOperand* left = instr->left();
1730  LOperand* right = instr->right();
1731  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1732  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1733  if (instr->hydrogen()->representation().IsInteger32()) {
1734  Register left_reg = ToRegister(left);
1735  Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1736  ? ToOperand(right)
1737  : Operand(EmitLoadRegister(right, ip));
1738  Register result_reg = ToRegister(instr->result());
1739  __ cmp(left_reg, right_op);
1740  if (!result_reg.is(left_reg)) {
1741  __ mov(result_reg, left_reg, LeaveCC, condition);
1742  }
1743  __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
1744  } else {
1745  ASSERT(instr->hydrogen()->representation().IsDouble());
1746  DoubleRegister left_reg = ToDoubleRegister(left);
1747  DoubleRegister right_reg = ToDoubleRegister(right);
1748  DoubleRegister result_reg = ToDoubleRegister(instr->result());
1749  Label check_nan_left, check_zero, return_left, return_right, done;
1750  __ VFPCompareAndSetFlags(left_reg, right_reg);
1751  __ b(vs, &check_nan_left);
1752  __ b(eq, &check_zero);
1753  __ b(condition, &return_left);
1754  __ b(al, &return_right);
1755 
1756  __ bind(&check_zero);
1757  __ VFPCompareAndSetFlags(left_reg, 0.0);
1758  __ b(ne, &return_left); // left == right != 0.
1759  // At this point, both left and right are either 0 or -0.
1760  if (operation == HMathMinMax::kMathMin) {
1761  // We could use a single 'vorr' instruction here if we had NEON support.
1762  __ vneg(left_reg, left_reg);
1763  __ vsub(result_reg, left_reg, right_reg);
1764  __ vneg(result_reg, result_reg);
1765  } else {
1766  // Since we operate on +0 and/or -0, vadd and vand have the same effect;
1767  // the decision for vadd is easy because vand is a NEON instruction.
1768  __ vadd(result_reg, left_reg, right_reg);
1769  }
1770  __ b(al, &done);
1771 
1772  __ bind(&check_nan_left);
1773  __ VFPCompareAndSetFlags(left_reg, left_reg);
1774  __ b(vs, &return_left); // left == NaN.
1775  __ bind(&return_right);
1776  if (!right_reg.is(result_reg)) {
1777  __ vmov(result_reg, right_reg);
1778  }
1779  __ b(al, &done);
1780 
1781  __ bind(&return_left);
1782  if (!left_reg.is(result_reg)) {
1783  __ vmov(result_reg, left_reg);
1784  }
1785  __ bind(&done);
1786  }
1787 }
1788 
1789 
1790 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1791  DoubleRegister left = ToDoubleRegister(instr->left());
1792  DoubleRegister right = ToDoubleRegister(instr->right());
1793  DoubleRegister result = ToDoubleRegister(instr->result());
1794  switch (instr->op()) {
1795  case Token::ADD:
1796  __ vadd(result, left, right);
1797  break;
1798  case Token::SUB:
1799  __ vsub(result, left, right);
1800  break;
1801  case Token::MUL:
1802  __ vmul(result, left, right);
1803  break;
1804  case Token::DIV:
1805  __ vdiv(result, left, right);
1806  break;
1807  case Token::MOD: {
1808  // Save r0-r3 on the stack.
1809  __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1810 
1811  __ PrepareCallCFunction(0, 2, scratch0());
1812  __ SetCallCDoubleArguments(left, right);
1813  __ CallCFunction(
1814  ExternalReference::double_fp_operation(Token::MOD, isolate()),
1815  0, 2);
1816  // Move the result in the double result register.
1817  __ GetCFunctionDoubleResult(result);
1818 
1819  // Restore r0-r3.
1820  __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1821  break;
1822  }
1823  default:
1824  UNREACHABLE();
1825  break;
1826  }
1827 }
1828 
1829 
1830 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1831  ASSERT(ToRegister(instr->left()).is(r1));
1832  ASSERT(ToRegister(instr->right()).is(r0));
1833  ASSERT(ToRegister(instr->result()).is(r0));
1834 
1835  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1836  // Block literal pool emission to ensure nop indicating no inlined smi code
1837  // is in the correct position.
1838  Assembler::BlockConstPoolScope block_const_pool(masm());
1839  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1840  __ nop(); // Signals no inlined code.
1841 }
1842 
1843 
1844 int LCodeGen::GetNextEmittedBlock(int block) {
1845  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1846  LLabel* label = chunk_->GetLabel(i);
1847  if (!label->HasReplacement()) return i;
1848  }
1849  return -1;
1850 }
1851 
1852 
1853 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1854  int next_block = GetNextEmittedBlock(current_block_);
1855  right_block = chunk_->LookupDestination(right_block);
1856  left_block = chunk_->LookupDestination(left_block);
1857 
1858  if (right_block == left_block) {
1859  EmitGoto(left_block);
1860  } else if (left_block == next_block) {
1861  __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1862  } else if (right_block == next_block) {
1863  __ b(cc, chunk_->GetAssemblyLabel(left_block));
1864  } else {
1865  __ b(cc, chunk_->GetAssemblyLabel(left_block));
1866  __ b(chunk_->GetAssemblyLabel(right_block));
1867  }
1868 }
1869 
1870 
1871 void LCodeGen::DoBranch(LBranch* instr) {
1872  int true_block = chunk_->LookupDestination(instr->true_block_id());
1873  int false_block = chunk_->LookupDestination(instr->false_block_id());
1874 
1875  Representation r = instr->hydrogen()->value()->representation();
1876  if (r.IsInteger32()) {
1877  Register reg = ToRegister(instr->value());
1878  __ cmp(reg, Operand(0));
1879  EmitBranch(true_block, false_block, ne);
1880  } else if (r.IsDouble()) {
1881  DoubleRegister reg = ToDoubleRegister(instr->value());
1882  Register scratch = scratch0();
1883 
1884  // Test the double value. Zero and NaN are false.
1885  __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1886  __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
1887  EmitBranch(true_block, false_block, eq);
1888  } else {
1889  ASSERT(r.IsTagged());
1890  Register reg = ToRegister(instr->value());
1891  HType type = instr->hydrogen()->value()->type();
1892  if (type.IsBoolean()) {
1893  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1894  EmitBranch(true_block, false_block, eq);
1895  } else if (type.IsSmi()) {
1896  __ cmp(reg, Operand(0));
1897  EmitBranch(true_block, false_block, ne);
1898  } else {
1899  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1900  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1901 
1902  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1903  // Avoid deopts in the case where we've never executed this path before.
1904  if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1905 
1906  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1907  // undefined -> false.
1908  __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1909  __ b(eq, false_label);
1910  }
1911  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1912  // Boolean -> its value.
1913  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1914  __ b(eq, true_label);
1915  __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1916  __ b(eq, false_label);
1917  }
1918  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1919  // 'null' -> false.
1920  __ CompareRoot(reg, Heap::kNullValueRootIndex);
1921  __ b(eq, false_label);
1922  }
1923 
1924  if (expected.Contains(ToBooleanStub::SMI)) {
1925  // Smis: 0 -> false, all other -> true.
1926  __ cmp(reg, Operand(0));
1927  __ b(eq, false_label);
1928  __ JumpIfSmi(reg, true_label);
1929  } else if (expected.NeedsMap()) {
1930  // If we need a map later and have a Smi -> deopt.
1931  __ tst(reg, Operand(kSmiTagMask));
1932  DeoptimizeIf(eq, instr->environment());
1933  }
1934 
1935  const Register map = scratch0();
1936  if (expected.NeedsMap()) {
1937  __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1938 
1939  if (expected.CanBeUndetectable()) {
1940  // Undetectable -> false.
1942  __ tst(ip, Operand(1 << Map::kIsUndetectable));
1943  __ b(ne, false_label);
1944  }
1945  }
1946 
1947  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1948  // spec object -> true.
1949  __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1950  __ b(ge, true_label);
1951  }
1952 
1953  if (expected.Contains(ToBooleanStub::STRING)) {
1954  // String value -> false iff empty.
1955  Label not_string;
1956  __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1957  __ b(ge, &not_string);
1959  __ cmp(ip, Operand(0));
1960  __ b(ne, true_label);
1961  __ b(false_label);
1962  __ bind(&not_string);
1963  }
1964 
1965  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1966  // heap number -> false iff +0, -0, or NaN.
1967  DoubleRegister dbl_scratch = double_scratch0();
1968  Label not_heap_number;
1969  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1970  __ b(ne, &not_heap_number);
1971  __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1972  __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
1973  __ b(vs, false_label); // NaN -> false.
1974  __ b(eq, false_label); // +0, -0 -> false.
1975  __ b(true_label);
1976  __ bind(&not_heap_number);
1977  }
1978 
1979  // We've seen something for the first time -> deopt.
1980  DeoptimizeIf(al, instr->environment());
1981  }
1982  }
1983 }
1984 
1985 
1986 void LCodeGen::EmitGoto(int block) {
1987  block = chunk_->LookupDestination(block);
1988  int next_block = GetNextEmittedBlock(current_block_);
1989  if (block != next_block) {
1990  __ jmp(chunk_->GetAssemblyLabel(block));
1991  }
1992 }
1993 
1994 
1995 void LCodeGen::DoGoto(LGoto* instr) {
1996  EmitGoto(instr->block_id());
1997 }
1998 
1999 
2000 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2001  Condition cond = kNoCondition;
2002  switch (op) {
2003  case Token::EQ:
2004  case Token::EQ_STRICT:
2005  cond = eq;
2006  break;
2007  case Token::LT:
2008  cond = is_unsigned ? lo : lt;
2009  break;
2010  case Token::GT:
2011  cond = is_unsigned ? hi : gt;
2012  break;
2013  case Token::LTE:
2014  cond = is_unsigned ? ls : le;
2015  break;
2016  case Token::GTE:
2017  cond = is_unsigned ? hs : ge;
2018  break;
2019  case Token::IN:
2020  case Token::INSTANCEOF:
2021  default:
2022  UNREACHABLE();
2023  }
2024  return cond;
2025 }
2026 
2027 
2028 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
2029  LOperand* left = instr->left();
2030  LOperand* right = instr->right();
2031  int false_block = chunk_->LookupDestination(instr->false_block_id());
2032  int true_block = chunk_->LookupDestination(instr->true_block_id());
2033  Condition cond = TokenToCondition(instr->op(), false);
2034 
2035  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2036  // We can statically evaluate the comparison.
2037  double left_val = ToDouble(LConstantOperand::cast(left));
2038  double right_val = ToDouble(LConstantOperand::cast(right));
2039  int next_block =
2040  EvalComparison(instr->op(), left_val, right_val) ? true_block
2041  : false_block;
2042  EmitGoto(next_block);
2043  } else {
2044  if (instr->is_double()) {
2045  // Compare left and right operands as doubles and load the
2046  // resulting flags into the normal status register.
2047  __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2048  // If a NaN is involved, i.e. the result is unordered (V set),
2049  // jump to false block label.
2050  __ b(vs, chunk_->GetAssemblyLabel(false_block));
2051  } else {
2052  if (right->IsConstantOperand()) {
2053  __ cmp(ToRegister(left),
2054  Operand(ToInteger32(LConstantOperand::cast(right))));
2055  } else if (left->IsConstantOperand()) {
2056  __ cmp(ToRegister(right),
2057  Operand(ToInteger32(LConstantOperand::cast(left))));
2058  // We transposed the operands. Reverse the condition.
2059  cond = ReverseCondition(cond);
2060  } else {
2061  __ cmp(ToRegister(left), ToRegister(right));
2062  }
2063  }
2064  EmitBranch(true_block, false_block, cond);
2065  }
2066 }
2067 
2068 
2069 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2070  Register left = ToRegister(instr->left());
2071  Register right = ToRegister(instr->right());
2072  int false_block = chunk_->LookupDestination(instr->false_block_id());
2073  int true_block = chunk_->LookupDestination(instr->true_block_id());
2074 
2075  __ cmp(left, Operand(right));
2076  EmitBranch(true_block, false_block, eq);
2077 }
2078 
2079 
2080 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
2081  Register left = ToRegister(instr->left());
2082  int true_block = chunk_->LookupDestination(instr->true_block_id());
2083  int false_block = chunk_->LookupDestination(instr->false_block_id());
2084 
2085  __ cmp(left, Operand(instr->hydrogen()->right()));
2086  EmitBranch(true_block, false_block, eq);
2087 }
2088 
2089 
2090 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
2091  Register scratch = scratch0();
2092  Register reg = ToRegister(instr->value());
2093  int false_block = chunk_->LookupDestination(instr->false_block_id());
2094 
2095  // If the expression is known to be untagged or a smi, then it's definitely
2096  // not null, and it can't be a an undetectable object.
2097  if (instr->hydrogen()->representation().IsSpecialization() ||
2098  instr->hydrogen()->type().IsSmi()) {
2099  EmitGoto(false_block);
2100  return;
2101  }
2102 
2103  int true_block = chunk_->LookupDestination(instr->true_block_id());
2104  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
2105  Heap::kNullValueRootIndex :
2106  Heap::kUndefinedValueRootIndex;
2107  __ LoadRoot(ip, nil_value);
2108  __ cmp(reg, ip);
2109  if (instr->kind() == kStrictEquality) {
2110  EmitBranch(true_block, false_block, eq);
2111  } else {
2112  Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
2113  Heap::kUndefinedValueRootIndex :
2114  Heap::kNullValueRootIndex;
2115  Label* true_label = chunk_->GetAssemblyLabel(true_block);
2116  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2117  __ b(eq, true_label);
2118  __ LoadRoot(ip, other_nil_value);
2119  __ cmp(reg, ip);
2120  __ b(eq, true_label);
2121  __ JumpIfSmi(reg, false_label);
2122  // Check for undetectable objects by looking in the bit field in
2123  // the map. The object has already been smi checked.
2124  __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
2125  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
2126  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
2127  EmitBranch(true_block, false_block, ne);
2128  }
2129 }
2130 
2131 
2132 Condition LCodeGen::EmitIsObject(Register input,
2133  Register temp1,
2134  Label* is_not_object,
2135  Label* is_object) {
2136  Register temp2 = scratch0();
2137  __ JumpIfSmi(input, is_not_object);
2138 
2139  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2140  __ cmp(input, temp2);
2141  __ b(eq, is_object);
2142 
2143  // Load map.
2144  __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2145  // Undetectable objects behave like undefined.
2146  __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2147  __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2148  __ b(ne, is_not_object);
2149 
2150  // Load instance type and check that it is in object type range.
2151  __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2152  __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2153  __ b(lt, is_not_object);
2154  __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2155  return le;
2156 }
2157 
2158 
2159 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2160  Register reg = ToRegister(instr->value());
2161  Register temp1 = ToRegister(instr->temp());
2162 
2163  int true_block = chunk_->LookupDestination(instr->true_block_id());
2164  int false_block = chunk_->LookupDestination(instr->false_block_id());
2165  Label* true_label = chunk_->GetAssemblyLabel(true_block);
2166  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2167 
2168  Condition true_cond =
2169  EmitIsObject(reg, temp1, false_label, true_label);
2170 
2171  EmitBranch(true_block, false_block, true_cond);
2172 }
2173 
2174 
2175 Condition LCodeGen::EmitIsString(Register input,
2176  Register temp1,
2177  Label* is_not_string) {
2178  __ JumpIfSmi(input, is_not_string);
2179  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2180 
2181  return lt;
2182 }
2183 
2184 
2185 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2186  Register reg = ToRegister(instr->value());
2187  Register temp1 = ToRegister(instr->temp());
2188 
2189  int true_block = chunk_->LookupDestination(instr->true_block_id());
2190  int false_block = chunk_->LookupDestination(instr->false_block_id());
2191  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2192 
2193  Condition true_cond =
2194  EmitIsString(reg, temp1, false_label);
2195 
2196  EmitBranch(true_block, false_block, true_cond);
2197 }
2198 
2199 
2200 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2201  int true_block = chunk_->LookupDestination(instr->true_block_id());
2202  int false_block = chunk_->LookupDestination(instr->false_block_id());
2203 
2204  Register input_reg = EmitLoadRegister(instr->value(), ip);
2205  __ tst(input_reg, Operand(kSmiTagMask));
2206  EmitBranch(true_block, false_block, eq);
2207 }
2208 
2209 
2210 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2211  Register input = ToRegister(instr->value());
2212  Register temp = ToRegister(instr->temp());
2213 
2214  int true_block = chunk_->LookupDestination(instr->true_block_id());
2215  int false_block = chunk_->LookupDestination(instr->false_block_id());
2216 
2217  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
2218  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2219  __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2220  __ tst(temp, Operand(1 << Map::kIsUndetectable));
2221  EmitBranch(true_block, false_block, ne);
2222 }
2223 
2224 
2225 static Condition ComputeCompareCondition(Token::Value op) {
2226  switch (op) {
2227  case Token::EQ_STRICT:
2228  case Token::EQ:
2229  return eq;
2230  case Token::LT:
2231  return lt;
2232  case Token::GT:
2233  return gt;
2234  case Token::LTE:
2235  return le;
2236  case Token::GTE:
2237  return ge;
2238  default:
2239  UNREACHABLE();
2240  return kNoCondition;
2241  }
2242 }
2243 
2244 
2245 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2246  Token::Value op = instr->op();
2247  int true_block = chunk_->LookupDestination(instr->true_block_id());
2248  int false_block = chunk_->LookupDestination(instr->false_block_id());
2249 
2250  Handle<Code> ic = CompareIC::GetUninitialized(op);
2251  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2252  __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2253 
2254  Condition condition = ComputeCompareCondition(op);
2255 
2256  EmitBranch(true_block, false_block, condition);
2257 }
2258 
2259 
2260 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2261  InstanceType from = instr->from();
2262  InstanceType to = instr->to();
2263  if (from == FIRST_TYPE) return to;
2264  ASSERT(from == to || to == LAST_TYPE);
2265  return from;
2266 }
2267 
2268 
2269 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2270  InstanceType from = instr->from();
2271  InstanceType to = instr->to();
2272  if (from == to) return eq;
2273  if (to == LAST_TYPE) return hs;
2274  if (from == FIRST_TYPE) return ls;
2275  UNREACHABLE();
2276  return eq;
2277 }
2278 
2279 
2280 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2281  Register scratch = scratch0();
2282  Register input = ToRegister(instr->value());
2283 
2284  int true_block = chunk_->LookupDestination(instr->true_block_id());
2285  int false_block = chunk_->LookupDestination(instr->false_block_id());
2286 
2287  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2288 
2289  __ JumpIfSmi(input, false_label);
2290 
2291  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2292  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
2293 }
2294 
2295 
2296 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2297  Register input = ToRegister(instr->value());
2298  Register result = ToRegister(instr->result());
2299 
2300  __ AssertString(input);
2301 
2302  __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2303  __ IndexFromHash(result, result);
2304 }
2305 
2306 
2307 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2308  LHasCachedArrayIndexAndBranch* instr) {
2309  Register input = ToRegister(instr->value());
2310  Register scratch = scratch0();
2311 
2312  int true_block = chunk_->LookupDestination(instr->true_block_id());
2313  int false_block = chunk_->LookupDestination(instr->false_block_id());
2314 
2315  __ ldr(scratch,
2317  __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2318  EmitBranch(true_block, false_block, eq);
2319 }
2320 
2321 
2322 // Branches to a label or falls through with the answer in flags. Trashes
2323 // the temp registers, but not the input.
2324 void LCodeGen::EmitClassOfTest(Label* is_true,
2325  Label* is_false,
2326  Handle<String>class_name,
2327  Register input,
2328  Register temp,
2329  Register temp2) {
2330  ASSERT(!input.is(temp));
2331  ASSERT(!input.is(temp2));
2332  ASSERT(!temp.is(temp2));
2333 
2334  __ JumpIfSmi(input, is_false);
2335 
2336  if (class_name->IsEqualTo(CStrVector("Function"))) {
2337  // Assuming the following assertions, we can use the same compares to test
2338  // for both being a function type and being in the object type range.
2343  LAST_SPEC_OBJECT_TYPE - 1);
2345  __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2346  __ b(lt, is_false);
2347  __ b(eq, is_true);
2348  __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2349  __ b(eq, is_true);
2350  } else {
2351  // Faster code path to avoid two compares: subtract lower bound from the
2352  // actual type and do a signed compare with the width of the type range.
2353  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2354  __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2355  __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2356  __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2358  __ b(gt, is_false);
2359  }
2360 
2361  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2362  // Check if the constructor in the map is a function.
2363  __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2364 
2365  // Objects with a non-function constructor have class 'Object'.
2366  __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2367  if (class_name->IsEqualTo(CStrVector("Object"))) {
2368  __ b(ne, is_true);
2369  } else {
2370  __ b(ne, is_false);
2371  }
2372 
2373  // temp now contains the constructor function. Grab the
2374  // instance class name from there.
2376  __ ldr(temp, FieldMemOperand(temp,
2378  // The class name we are testing against is a symbol because it's a literal.
2379  // The name in the constructor is a symbol because of the way the context is
2380  // booted. This routine isn't expected to work for random API-created
2381  // classes and it doesn't have to because you can't access it with natives
2382  // syntax. Since both sides are symbols it is sufficient to use an identity
2383  // comparison.
2384  __ cmp(temp, Operand(class_name));
2385  // End with the answer in flags.
2386 }
2387 
2388 
2389 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2390  Register input = ToRegister(instr->value());
2391  Register temp = scratch0();
2392  Register temp2 = ToRegister(instr->temp());
2393  Handle<String> class_name = instr->hydrogen()->class_name();
2394 
2395  int true_block = chunk_->LookupDestination(instr->true_block_id());
2396  int false_block = chunk_->LookupDestination(instr->false_block_id());
2397 
2398  Label* true_label = chunk_->GetAssemblyLabel(true_block);
2399  Label* false_label = chunk_->GetAssemblyLabel(false_block);
2400 
2401  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2402 
2403  EmitBranch(true_block, false_block, eq);
2404 }
2405 
2406 
2407 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2408  Register reg = ToRegister(instr->value());
2409  Register temp = ToRegister(instr->temp());
2410  int true_block = instr->true_block_id();
2411  int false_block = instr->false_block_id();
2412 
2413  __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2414  __ cmp(temp, Operand(instr->map()));
2415  EmitBranch(true_block, false_block, eq);
2416 }
2417 
2418 
2419 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2420  ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
2421  ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
2422 
2423  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2424  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2425 
2426  __ cmp(r0, Operand(0));
2427  __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2428  __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2429 }
2430 
2431 
2432 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2433  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2434  public:
2435  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2436  LInstanceOfKnownGlobal* instr)
2437  : LDeferredCode(codegen), instr_(instr) { }
2438  virtual void Generate() {
2439  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2440  }
2441  virtual LInstruction* instr() { return instr_; }
2442  Label* map_check() { return &map_check_; }
2443  private:
2444  LInstanceOfKnownGlobal* instr_;
2445  Label map_check_;
2446  };
2447 
2448  DeferredInstanceOfKnownGlobal* deferred;
2449  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2450 
2451  Label done, false_result;
2452  Register object = ToRegister(instr->value());
2453  Register temp = ToRegister(instr->temp());
2454  Register result = ToRegister(instr->result());
2455 
2456  ASSERT(object.is(r0));
2457  ASSERT(result.is(r0));
2458 
2459  // A Smi is not instance of anything.
2460  __ JumpIfSmi(object, &false_result);
2461 
2462  // This is the inlined call site instanceof cache. The two occurences of the
2463  // hole value will be patched to the last map/result pair generated by the
2464  // instanceof stub.
2465  Label cache_miss;
2466  Register map = temp;
2467  __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2468  {
2469  // Block constant pool emission to ensure the positions of instructions are
2470  // as expected by the patcher. See InstanceofStub::Generate().
2471  Assembler::BlockConstPoolScope block_const_pool(masm());
2472  __ bind(deferred->map_check()); // Label for calculating code patching.
2473  // We use Factory::the_hole_value() on purpose instead of loading from the
2474  // root array to force relocation to be able to later patch with
2475  // the cached map.
2476  PredictableCodeSizeScope predictable(masm_);
2477  Handle<JSGlobalPropertyCell> cell =
2478  factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2479  __ mov(ip, Operand(Handle<Object>(cell)));
2481  __ cmp(map, Operand(ip));
2482  __ b(ne, &cache_miss);
2483  // We use Factory::the_hole_value() on purpose instead of loading from the
2484  // root array to force relocation to be able to later patch
2485  // with true or false.
2486  __ mov(result, Operand(factory()->the_hole_value()));
2487  }
2488  __ b(&done);
2489 
2490  // The inlined call site cache did not match. Check null and string before
2491  // calling the deferred code.
2492  __ bind(&cache_miss);
2493  // Null is not instance of anything.
2494  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2495  __ cmp(object, Operand(ip));
2496  __ b(eq, &false_result);
2497 
2498  // String values is not instance of anything.
2499  Condition is_string = masm_->IsObjectStringType(object, temp);
2500  __ b(is_string, &false_result);
2501 
2502  // Go to the deferred code.
2503  __ b(deferred->entry());
2504 
2505  __ bind(&false_result);
2506  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2507 
2508  // Here result has either true or false. Deferred code also produces true or
2509  // false object.
2510  __ bind(deferred->exit());
2511  __ bind(&done);
2512 }
2513 
2514 
2515 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2516  Label* map_check) {
2517  Register result = ToRegister(instr->result());
2518  ASSERT(result.is(r0));
2519 
2521  flags = static_cast<InstanceofStub::Flags>(
2523  flags = static_cast<InstanceofStub::Flags>(
2525  flags = static_cast<InstanceofStub::Flags>(
2527  InstanceofStub stub(flags);
2528 
2529  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2530 
2531  // Get the temp register reserved by the instruction. This needs to be r4 as
2532  // its slot of the pushing of safepoint registers is used to communicate the
2533  // offset to the location of the map check.
2534  Register temp = ToRegister(instr->temp());
2535  ASSERT(temp.is(r4));
2536  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2537  static const int kAdditionalDelta = 5;
2538  // Make sure that code size is predicable, since we use specific constants
2539  // offsets in the code to find embedded values..
2540  PredictableCodeSizeScope predictable(masm_);
2541  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2542  Label before_push_delta;
2543  __ bind(&before_push_delta);
2544  __ BlockConstPoolFor(kAdditionalDelta);
2545  __ mov(temp, Operand(delta * kPointerSize));
2546  // The mov above can generate one or two instructions. The delta was computed
2547  // for two instructions, so we need to pad here in case of one instruction.
2548  if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2549  ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2550  __ nop();
2551  }
2552  __ StoreToSafepointRegisterSlot(temp, temp);
2553  CallCodeGeneric(stub.GetCode(),
2554  RelocInfo::CODE_TARGET,
2555  instr,
2556  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2557  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2558  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2559  // Put the result value into the result register slot and
2560  // restore all registers.
2561  __ StoreToSafepointRegisterSlot(result, result);
2562 }
2563 
2564 
2565 void LCodeGen::DoCmpT(LCmpT* instr) {
2566  Token::Value op = instr->op();
2567 
2568  Handle<Code> ic = CompareIC::GetUninitialized(op);
2569  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2570  __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2571 
2572  Condition condition = ComputeCompareCondition(op);
2573  __ LoadRoot(ToRegister(instr->result()),
2574  Heap::kTrueValueRootIndex,
2575  condition);
2576  __ LoadRoot(ToRegister(instr->result()),
2577  Heap::kFalseValueRootIndex,
2578  NegateCondition(condition));
2579 }
2580 
2581 
2582 void LCodeGen::DoReturn(LReturn* instr) {
2583  if (FLAG_trace) {
2584  // Push the return value on the stack as the parameter.
2585  // Runtime::TraceExit returns its parameter in r0.
2586  __ push(r0);
2587  __ CallRuntime(Runtime::kTraceExit, 1);
2588  }
2589  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2590  __ mov(sp, fp);
2591  __ ldm(ia_w, sp, fp.bit() | lr.bit());
2592  __ add(sp, sp, Operand(sp_delta));
2593  __ Jump(lr);
2594 }
2595 
2596 
2597 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2598  Register result = ToRegister(instr->result());
2599  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2601  if (instr->hydrogen()->RequiresHoleCheck()) {
2602  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2603  __ cmp(result, ip);
2604  DeoptimizeIf(eq, instr->environment());
2605  }
2606 }
2607 
2608 
2609 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2610  ASSERT(ToRegister(instr->global_object()).is(r0));
2611  ASSERT(ToRegister(instr->result()).is(r0));
2612 
2613  __ mov(r2, Operand(instr->name()));
2614  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2615  : RelocInfo::CODE_TARGET_CONTEXT;
2616  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2617  CallCode(ic, mode, instr);
2618 }
2619 
2620 
2621 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2622  Register value = ToRegister(instr->value());
2623  Register cell = scratch0();
2624 
2625  // Load the cell.
2626  __ mov(cell, Operand(instr->hydrogen()->cell()));
2627 
2628  // If the cell we are storing to contains the hole it could have
2629  // been deleted from the property dictionary. In that case, we need
2630  // to update the property details in the property dictionary to mark
2631  // it as no longer deleted.
2632  if (instr->hydrogen()->RequiresHoleCheck()) {
2633  // We use a temp to check the payload (CompareRoot might clobber ip).
2634  Register payload = ToRegister(instr->temp());
2636  __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2637  DeoptimizeIf(eq, instr->environment());
2638  }
2639 
2640  // Store the value.
2642  // Cells are always rescanned, so no write barrier here.
2643 }
2644 
2645 
2646 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2647  ASSERT(ToRegister(instr->global_object()).is(r1));
2648  ASSERT(ToRegister(instr->value()).is(r0));
2649 
2650  __ mov(r2, Operand(instr->name()));
2651  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2652  ? isolate()->builtins()->StoreIC_Initialize_Strict()
2653  : isolate()->builtins()->StoreIC_Initialize();
2654  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2655 }
2656 
2657 
2658 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2659  Register context = ToRegister(instr->context());
2660  Register result = ToRegister(instr->result());
2661  __ ldr(result, ContextOperand(context, instr->slot_index()));
2662  if (instr->hydrogen()->RequiresHoleCheck()) {
2663  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2664  __ cmp(result, ip);
2665  if (instr->hydrogen()->DeoptimizesOnHole()) {
2666  DeoptimizeIf(eq, instr->environment());
2667  } else {
2668  __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
2669  }
2670  }
2671 }
2672 
2673 
2674 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2675  Register context = ToRegister(instr->context());
2676  Register value = ToRegister(instr->value());
2677  Register scratch = scratch0();
2678  MemOperand target = ContextOperand(context, instr->slot_index());
2679 
2680  Label skip_assignment;
2681 
2682  if (instr->hydrogen()->RequiresHoleCheck()) {
2683  __ ldr(scratch, target);
2684  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2685  __ cmp(scratch, ip);
2686  if (instr->hydrogen()->DeoptimizesOnHole()) {
2687  DeoptimizeIf(eq, instr->environment());
2688  } else {
2689  __ b(ne, &skip_assignment);
2690  }
2691  }
2692 
2693  __ str(value, target);
2694  if (instr->hydrogen()->NeedsWriteBarrier()) {
2695  HType type = instr->hydrogen()->value()->type();
2696  SmiCheck check_needed =
2697  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2698  __ RecordWriteContextSlot(context,
2699  target.offset(),
2700  value,
2701  scratch,
2703  kSaveFPRegs,
2705  check_needed);
2706  }
2707 
2708  __ bind(&skip_assignment);
2709 }
2710 
2711 
2712 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2713  Register object = ToRegister(instr->object());
2714  Register result = ToRegister(instr->result());
2715  if (instr->hydrogen()->is_in_object()) {
2716  __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2717  } else {
2718  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2719  __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2720  }
2721 }
2722 
2723 
2724 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2725  Register object,
2726  Handle<Map> type,
2727  Handle<String> name,
2728  LEnvironment* env) {
2729  LookupResult lookup(isolate());
2730  type->LookupDescriptor(NULL, *name, &lookup);
2731  ASSERT(lookup.IsFound() || lookup.IsCacheable());
2732  if (lookup.IsField()) {
2733  int index = lookup.GetLocalFieldIndexFromMap(*type);
2734  int offset = index * kPointerSize;
2735  if (index < 0) {
2736  // Negative property indices are in-object properties, indexed
2737  // from the end of the fixed part of the object.
2738  __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
2739  } else {
2740  // Non-negative property indices are in the properties array.
2741  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2742  __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2743  }
2744  } else if (lookup.IsConstantFunction()) {
2745  Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2746  __ LoadHeapObject(result, function);
2747  } else {
2748  // Negative lookup.
2749  // Check prototypes.
2750  Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
2751  Heap* heap = type->GetHeap();
2752  while (*current != heap->null_value()) {
2753  __ LoadHeapObject(result, current);
2754  __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
2755  __ cmp(result, Operand(Handle<Map>(current->map())));
2756  DeoptimizeIf(ne, env);
2757  current =
2758  Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
2759  }
2760  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2761  }
2762 }
2763 
2764 
2765 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2766  Register object = ToRegister(instr->object());
2767  Register result = ToRegister(instr->result());
2768  Register object_map = scratch0();
2769 
2770  int map_count = instr->hydrogen()->types()->length();
2771  bool need_generic = instr->hydrogen()->need_generic();
2772 
2773  if (map_count == 0 && !need_generic) {
2774  DeoptimizeIf(al, instr->environment());
2775  return;
2776  }
2777  Handle<String> name = instr->hydrogen()->name();
2778  Label done;
2779  __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2780  for (int i = 0; i < map_count; ++i) {
2781  bool last = (i == map_count - 1);
2782  Handle<Map> map = instr->hydrogen()->types()->at(i);
2783  Label check_passed;
2784  __ CompareMap(
2785  object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
2786  if (last && !need_generic) {
2787  DeoptimizeIf(ne, instr->environment());
2788  __ bind(&check_passed);
2789  EmitLoadFieldOrConstantFunction(
2790  result, object, map, name, instr->environment());
2791  } else {
2792  Label next;
2793  __ b(ne, &next);
2794  __ bind(&check_passed);
2795  EmitLoadFieldOrConstantFunction(
2796  result, object, map, name, instr->environment());
2797  __ b(&done);
2798  __ bind(&next);
2799  }
2800  }
2801  if (need_generic) {
2802  __ mov(r2, Operand(name));
2803  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2804  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
2805  }
2806  __ bind(&done);
2807 }
2808 
2809 
2810 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2811  ASSERT(ToRegister(instr->object()).is(r0));
2812  ASSERT(ToRegister(instr->result()).is(r0));
2813 
2814  // Name is always in r2.
2815  __ mov(r2, Operand(instr->name()));
2816  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2817  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
2818 }
2819 
2820 
2821 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2822  Register scratch = scratch0();
2823  Register function = ToRegister(instr->function());
2824  Register result = ToRegister(instr->result());
2825 
2826  // Check that the function really is a function. Load map into the
2827  // result register.
2828  __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2829  DeoptimizeIf(ne, instr->environment());
2830 
2831  // Make sure that the function has an instance prototype.
2832  Label non_instance;
2833  __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2834  __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2835  __ b(ne, &non_instance);
2836 
2837  // Get the prototype or initial map from the function.
2838  __ ldr(result,
2840 
2841  // Check that the function has a prototype or an initial map.
2842  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2843  __ cmp(result, ip);
2844  DeoptimizeIf(eq, instr->environment());
2845 
2846  // If the function does not have an initial map, we're done.
2847  Label done;
2848  __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2849  __ b(ne, &done);
2850 
2851  // Get the prototype from the initial map.
2852  __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2853  __ jmp(&done);
2854 
2855  // Non-instance prototype: Fetch prototype from constructor field
2856  // in initial map.
2857  __ bind(&non_instance);
2858  __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2859 
2860  // All done.
2861  __ bind(&done);
2862 }
2863 
2864 
2865 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2866  Register result = ToRegister(instr->result());
2867  Register input = ToRegister(instr->object());
2868  Register scratch = scratch0();
2869 
2870  __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
2871  if (FLAG_debug_code) {
2872  Label done, fail;
2873  __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2874  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2875  __ cmp(scratch, ip);
2876  __ b(eq, &done);
2877  __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2878  __ cmp(scratch, ip);
2879  __ b(eq, &done);
2880  // |scratch| still contains |input|'s map.
2881  __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2882  __ ubfx(scratch, scratch, Map::kElementsKindShift,
2884  __ cmp(scratch, Operand(GetInitialFastElementsKind()));
2885  __ b(lt, &fail);
2886  __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
2887  __ b(le, &done);
2888  __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2889  __ b(lt, &fail);
2890  __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2891  __ b(le, &done);
2892  __ bind(&fail);
2893  __ Abort("Check for fast or external elements failed.");
2894  __ bind(&done);
2895  }
2896 }
2897 
2898 
2899 void LCodeGen::DoLoadExternalArrayPointer(
2900  LLoadExternalArrayPointer* instr) {
2901  Register to_reg = ToRegister(instr->result());
2902  Register from_reg = ToRegister(instr->object());
2903  __ ldr(to_reg, FieldMemOperand(from_reg,
2905 }
2906 
2907 
2908 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2909  Register arguments = ToRegister(instr->arguments());
2910  Register length = ToRegister(instr->length());
2911  Register index = ToRegister(instr->index());
2912  Register result = ToRegister(instr->result());
2913  // There are two words between the frame pointer and the last argument.
2914  // Subtracting from length accounts for one of them add one more.
2915  __ sub(length, length, index);
2916  __ add(length, length, Operand(1));
2917  __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
2918 }
2919 
2920 
2921 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2922  Register elements = ToRegister(instr->elements());
2923  Register result = ToRegister(instr->result());
2924  Register scratch = scratch0();
2925  Register store_base = scratch;
2926  int offset = 0;
2927 
2928  if (instr->key()->IsConstantOperand()) {
2929  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2930  offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
2931  instr->additional_index());
2932  store_base = elements;
2933  } else {
2934  Register key = EmitLoadRegister(instr->key(), scratch0());
2935  // Even though the HLoadKeyedFastElement instruction forces the input
2936  // representation for the key to be an integer, the input gets replaced
2937  // during bound check elimination with the index argument to the bounds
2938  // check, which can be tagged, so that case must be handled here, too.
2939  if (instr->hydrogen()->key()->representation().IsTagged()) {
2940  __ add(scratch, elements,
2941  Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
2942  } else {
2943  __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
2944  }
2945  offset = FixedArray::OffsetOfElementAt(instr->additional_index());
2946  }
2947  __ ldr(result, FieldMemOperand(store_base, offset));
2948 
2949  // Check for the hole value.
2950  if (instr->hydrogen()->RequiresHoleCheck()) {
2951  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2952  __ tst(result, Operand(kSmiTagMask));
2953  DeoptimizeIf(ne, instr->environment());
2954  } else {
2955  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2956  __ cmp(result, scratch);
2957  DeoptimizeIf(eq, instr->environment());
2958  }
2959  }
2960 }
2961 
2962 
2963 void LCodeGen::DoLoadKeyedFastDoubleElement(
2964  LLoadKeyedFastDoubleElement* instr) {
2965  Register elements = ToRegister(instr->elements());
2966  bool key_is_constant = instr->key()->IsConstantOperand();
2967  Register key = no_reg;
2968  DwVfpRegister result = ToDoubleRegister(instr->result());
2969  Register scratch = scratch0();
2970 
2971  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2972  int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
2973  ? (element_size_shift - kSmiTagSize) : element_size_shift;
2974  int constant_key = 0;
2975  if (key_is_constant) {
2976  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2977  if (constant_key & 0xF0000000) {
2978  Abort("array index constant value too big.");
2979  }
2980  } else {
2981  key = ToRegister(instr->key());
2982  }
2983 
2984  Operand operand = key_is_constant
2985  ? Operand(((constant_key + instr->additional_index()) <<
2986  element_size_shift) +
2988  : Operand(key, LSL, shift_size);
2989  __ add(elements, elements, operand);
2990  if (!key_is_constant) {
2991  __ add(elements, elements,
2993  (instr->additional_index() << element_size_shift)));
2994  }
2995 
2996  if (instr->hydrogen()->RequiresHoleCheck()) {
2997  __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2998  __ cmp(scratch, Operand(kHoleNanUpper32));
2999  DeoptimizeIf(eq, instr->environment());
3000  }
3001 
3002  __ vldr(result, elements, 0);
3003 }
3004 
3005 
3006 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3007  Register base,
3008  bool key_is_constant,
3009  int constant_key,
3010  int element_size,
3011  int shift_size,
3012  int additional_index,
3013  int additional_offset) {
3014  if (additional_index != 0 && !key_is_constant) {
3015  additional_index *= 1 << (element_size - shift_size);
3016  __ add(scratch0(), key, Operand(additional_index));
3017  }
3018 
3019  if (key_is_constant) {
3020  return MemOperand(base,
3021  (constant_key << element_size) + additional_offset);
3022  }
3023 
3024  if (additional_index == 0) {
3025  if (shift_size >= 0) {
3026  return MemOperand(base, key, LSL, shift_size);
3027  } else {
3028  ASSERT_EQ(-1, shift_size);
3029  return MemOperand(base, key, LSR, 1);
3030  }
3031  }
3032 
3033  if (shift_size >= 0) {
3034  return MemOperand(base, scratch0(), LSL, shift_size);
3035  } else {
3036  ASSERT_EQ(-1, shift_size);
3037  return MemOperand(base, scratch0(), LSR, 1);
3038  }
3039 }
3040 
3041 
3042 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
3043  LLoadKeyedSpecializedArrayElement* instr) {
3044  Register external_pointer = ToRegister(instr->external_pointer());
3045  Register key = no_reg;
3046  ElementsKind elements_kind = instr->elements_kind();
3047  bool key_is_constant = instr->key()->IsConstantOperand();
3048  int constant_key = 0;
3049  if (key_is_constant) {
3050  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3051  if (constant_key & 0xF0000000) {
3052  Abort("array index constant value too big.");
3053  }
3054  } else {
3055  key = ToRegister(instr->key());
3056  }
3057  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3058  int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
3059  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3060  int additional_offset = instr->additional_index() << element_size_shift;
3061 
3062  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3063  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3064  CpuFeatures::Scope scope(VFP3);
3065  DwVfpRegister result = ToDoubleRegister(instr->result());
3066  Operand operand = key_is_constant
3067  ? Operand(constant_key << element_size_shift)
3068  : Operand(key, LSL, shift_size);
3069  __ add(scratch0(), external_pointer, operand);
3070  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3071  __ vldr(result.low(), scratch0(), additional_offset);
3072  __ vcvt_f64_f32(result, result.low());
3073  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3074  __ vldr(result, scratch0(), additional_offset);
3075  }
3076  } else {
3077  Register result = ToRegister(instr->result());
3078  MemOperand mem_operand = PrepareKeyedOperand(
3079  key, external_pointer, key_is_constant, constant_key,
3080  element_size_shift, shift_size,
3081  instr->additional_index(), additional_offset);
3082  switch (elements_kind) {
3084  __ ldrsb(result, mem_operand);
3085  break;
3088  __ ldrb(result, mem_operand);
3089  break;
3091  __ ldrsh(result, mem_operand);
3092  break;
3094  __ ldrh(result, mem_operand);
3095  break;
3096  case EXTERNAL_INT_ELEMENTS:
3097  __ ldr(result, mem_operand);
3098  break;
3100  __ ldr(result, mem_operand);
3101  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3102  __ cmp(result, Operand(0x80000000));
3103  DeoptimizeIf(cs, instr->environment());
3104  }
3105  break;
3109  case FAST_HOLEY_ELEMENTS:
3111  case FAST_DOUBLE_ELEMENTS:
3112  case FAST_ELEMENTS:
3113  case FAST_SMI_ELEMENTS:
3114  case DICTIONARY_ELEMENTS:
3116  UNREACHABLE();
3117  break;
3118  }
3119  }
3120 }
3121 
3122 
3123 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3124  ASSERT(ToRegister(instr->object()).is(r1));
3125  ASSERT(ToRegister(instr->key()).is(r0));
3126 
3127  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3128  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3129 }
3130 
3131 
3132 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3133  Register scratch = scratch0();
3134  Register result = ToRegister(instr->result());
3135 
3136  if (instr->hydrogen()->from_inlined()) {
3137  __ sub(result, sp, Operand(2 * kPointerSize));
3138  } else {
3139  // Check if the calling frame is an arguments adaptor frame.
3140  Label done, adapted;
3142  __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3143  __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3144 
3145  // Result is the frame pointer for the frame if not adapted and for the real
3146  // frame below the adaptor frame if adapted.
3147  __ mov(result, fp, LeaveCC, ne);
3148  __ mov(result, scratch, LeaveCC, eq);
3149  }
3150 }
3151 
3152 
3153 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3154  Register elem = ToRegister(instr->elements());
3155  Register result = ToRegister(instr->result());
3156 
3157  Label done;
3158 
3159  // If no arguments adaptor frame the number of arguments is fixed.
3160  __ cmp(fp, elem);
3161  __ mov(result, Operand(scope()->num_parameters()));
3162  __ b(eq, &done);
3163 
3164  // Arguments adaptor frame present. Get argument length from there.
3166  __ ldr(result,
3168  __ SmiUntag(result);
3169 
3170  // Argument length is in result register.
3171  __ bind(&done);
3172 }
3173 
3174 
3175 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3176  Register receiver = ToRegister(instr->receiver());
3177  Register function = ToRegister(instr->function());
3178  Register scratch = scratch0();
3179 
3180  // If the receiver is null or undefined, we have to pass the global
3181  // object as a receiver to normal functions. Values have to be
3182  // passed unchanged to builtins and strict-mode functions.
3183  Label global_object, receiver_ok;
3184 
3185  // Do not transform the receiver to object for strict mode
3186  // functions.
3187  __ ldr(scratch,
3189  __ ldr(scratch,
3191  __ tst(scratch,
3193  __ b(ne, &receiver_ok);
3194 
3195  // Do not transform the receiver to object for builtins.
3196  __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3197  __ b(ne, &receiver_ok);
3198 
3199  // Normal function. Replace undefined or null with global receiver.
3200  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3201  __ cmp(receiver, scratch);
3202  __ b(eq, &global_object);
3203  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3204  __ cmp(receiver, scratch);
3205  __ b(eq, &global_object);
3206 
3207  // Deoptimize if the receiver is not a JS object.
3208  __ tst(receiver, Operand(kSmiTagMask));
3209  DeoptimizeIf(eq, instr->environment());
3210  __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3211  DeoptimizeIf(lt, instr->environment());
3212  __ jmp(&receiver_ok);
3213 
3214  __ bind(&global_object);
3215  __ ldr(receiver, GlobalObjectOperand());
3216  __ ldr(receiver,
3218  __ bind(&receiver_ok);
3219 }
3220 
3221 
3222 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3223  Register receiver = ToRegister(instr->receiver());
3224  Register function = ToRegister(instr->function());
3225  Register length = ToRegister(instr->length());
3226  Register elements = ToRegister(instr->elements());
3227  Register scratch = scratch0();
3228  ASSERT(receiver.is(r0)); // Used for parameter count.
3229  ASSERT(function.is(r1)); // Required by InvokeFunction.
3230  ASSERT(ToRegister(instr->result()).is(r0));
3231 
3232  // Copy the arguments to this function possibly from the
3233  // adaptor frame below it.
3234  const uint32_t kArgumentsLimit = 1 * KB;
3235  __ cmp(length, Operand(kArgumentsLimit));
3236  DeoptimizeIf(hi, instr->environment());
3237 
3238  // Push the receiver and use the register to keep the original
3239  // number of arguments.
3240  __ push(receiver);
3241  __ mov(receiver, length);
3242  // The arguments are at a one pointer size offset from elements.
3243  __ add(elements, elements, Operand(1 * kPointerSize));
3244 
3245  // Loop through the arguments pushing them onto the execution
3246  // stack.
3247  Label invoke, loop;
3248  // length is a small non-negative integer, due to the test above.
3249  __ cmp(length, Operand(0));
3250  __ b(eq, &invoke);
3251  __ bind(&loop);
3252  __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3253  __ push(scratch);
3254  __ sub(length, length, Operand(1), SetCC);
3255  __ b(ne, &loop);
3256 
3257  __ bind(&invoke);
3258  ASSERT(instr->HasPointerMap());
3259  LPointerMap* pointers = instr->pointer_map();
3260  RecordPosition(pointers->position());
3261  SafepointGenerator safepoint_generator(
3262  this, pointers, Safepoint::kLazyDeopt);
3263  // The number of arguments is stored in receiver which is r0, as expected
3264  // by InvokeFunction.
3265  ParameterCount actual(receiver);
3266  __ InvokeFunction(function, actual, CALL_FUNCTION,
3267  safepoint_generator, CALL_AS_METHOD);
3269 }
3270 
3271 
3272 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3273  LOperand* argument = instr->value();
3274  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3275  Abort("DoPushArgument not implemented for double type.");
3276  } else {
3277  Register argument_reg = EmitLoadRegister(argument, ip);
3278  __ push(argument_reg);
3279  }
3280 }
3281 
3282 
3283 void LCodeGen::DoDrop(LDrop* instr) {
3284  __ Drop(instr->count());
3285 }
3286 
3287 
3288 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3289  Register result = ToRegister(instr->result());
3291 }
3292 
3293 
3294 void LCodeGen::DoContext(LContext* instr) {
3295  Register result = ToRegister(instr->result());
3296  __ mov(result, cp);
3297 }
3298 
3299 
3300 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3301  Register context = ToRegister(instr->context());
3302  Register result = ToRegister(instr->result());
3303  __ ldr(result,
3305 }
3306 
3307 
3308 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3309  __ push(cp); // The context is the first argument.
3310  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3311  __ push(scratch0());
3312  __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3313  __ push(scratch0());
3314  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3315 }
3316 
3317 
3318 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3319  Register result = ToRegister(instr->result());
3321 }
3322 
3323 
3324 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3325  Register global = ToRegister(instr->global_object());
3326  Register result = ToRegister(instr->result());
3328 }
3329 
3330 
3331 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3332  int arity,
3333  LInstruction* instr,
3334  CallKind call_kind,
3335  R1State r1_state) {
3336  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
3337  function->shared()->formal_parameter_count() == arity;
3338 
3339  LPointerMap* pointers = instr->pointer_map();
3340  RecordPosition(pointers->position());
3341 
3342  if (can_invoke_directly) {
3343  if (r1_state == R1_UNINITIALIZED) {
3344  __ LoadHeapObject(r1, function);
3345  }
3346 
3347  // Change context.
3349 
3350  // Set r0 to arguments count if adaption is not needed. Assumes that r0
3351  // is available to write to at this point.
3352  if (!function->NeedsArgumentsAdaption()) {
3353  __ mov(r0, Operand(arity));
3354  }
3355 
3356  // Invoke function.
3357  __ SetCallKind(r5, call_kind);
3359  __ Call(ip);
3360 
3361  // Set up deoptimization.
3362  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3363  } else {
3364  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3365  ParameterCount count(arity);
3366  __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
3367  }
3368 
3369  // Restore context.
3371 }
3372 
3373 
3374 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3375  ASSERT(ToRegister(instr->result()).is(r0));
3376  CallKnownFunction(instr->function(),
3377  instr->arity(),
3378  instr,
3380  R1_UNINITIALIZED);
3381 }
3382 
3383 
3384 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3385  Register input = ToRegister(instr->value());
3386  Register result = ToRegister(instr->result());
3387  Register scratch = scratch0();
3388 
3389  // Deoptimize if not a heap number.
3390  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3391  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3392  __ cmp(scratch, Operand(ip));
3393  DeoptimizeIf(ne, instr->environment());
3394 
3395  Label done;
3396  Register exponent = scratch0();
3397  scratch = no_reg;
3398  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3399  // Check the sign of the argument. If the argument is positive, just
3400  // return it.
3401  __ tst(exponent, Operand(HeapNumber::kSignMask));
3402  // Move the input to the result if necessary.
3403  __ Move(result, input);
3404  __ b(eq, &done);
3405 
3406  // Input is negative. Reverse its sign.
3407  // Preserve the value of all registers.
3408  {
3409  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3410 
3411  // Registers were saved at the safepoint, so we can use
3412  // many scratch registers.
3413  Register tmp1 = input.is(r1) ? r0 : r1;
3414  Register tmp2 = input.is(r2) ? r0 : r2;
3415  Register tmp3 = input.is(r3) ? r0 : r3;
3416  Register tmp4 = input.is(r4) ? r0 : r4;
3417 
3418  // exponent: floating point exponent value.
3419 
3420  Label allocated, slow;
3421  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3422  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3423  __ b(&allocated);
3424 
3425  // Slow case: Call the runtime system to do the number allocation.
3426  __ bind(&slow);
3427 
3428  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3429  // Set the pointer to the new heap number in tmp.
3430  if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3431  // Restore input_reg after call to runtime.
3432  __ LoadFromSafepointRegisterSlot(input, input);
3433  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3434 
3435  __ bind(&allocated);
3436  // exponent: floating point exponent value.
3437  // tmp1: allocated heap number.
3438  __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3439  __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3440  __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3442 
3443  __ StoreToSafepointRegisterSlot(tmp1, result);
3444  }
3445 
3446  __ bind(&done);
3447 }
3448 
3449 
3450 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3451  Register input = ToRegister(instr->value());
3452  Register result = ToRegister(instr->result());
3453  __ cmp(input, Operand(0));
3454  __ Move(result, input, pl);
3455  // We can make rsb conditional because the previous cmp instruction
3456  // will clear the V (overflow) flag and rsb won't set this flag
3457  // if input is positive.
3458  __ rsb(result, input, Operand(0), SetCC, mi);
3459  // Deoptimize on overflow.
3460  DeoptimizeIf(vs, instr->environment());
3461 }
3462 
3463 
3464 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3465  // Class for deferred case.
3466  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3467  public:
3468  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3469  LUnaryMathOperation* instr)
3470  : LDeferredCode(codegen), instr_(instr) { }
3471  virtual void Generate() {
3472  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3473  }
3474  virtual LInstruction* instr() { return instr_; }
3475  private:
3476  LUnaryMathOperation* instr_;
3477  };
3478 
3479  Representation r = instr->hydrogen()->value()->representation();
3480  if (r.IsDouble()) {
3481  DwVfpRegister input = ToDoubleRegister(instr->value());
3482  DwVfpRegister result = ToDoubleRegister(instr->result());
3483  __ vabs(result, input);
3484  } else if (r.IsInteger32()) {
3485  EmitIntegerMathAbs(instr);
3486  } else {
3487  // Representation is tagged.
3488  DeferredMathAbsTaggedHeapNumber* deferred =
3489  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3490  Register input = ToRegister(instr->value());
3491  // Smi check.
3492  __ JumpIfNotSmi(input, deferred->entry());
3493  // If smi, handle it directly.
3494  EmitIntegerMathAbs(instr);
3495  __ bind(deferred->exit());
3496  }
3497 }
3498 
3499 
3500 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3501  DoubleRegister input = ToDoubleRegister(instr->value());
3502  Register result = ToRegister(instr->result());
3503  Register scratch = scratch0();
3504 
3505  __ EmitVFPTruncate(kRoundToMinusInf,
3506  result,
3507  input,
3508  scratch,
3509  double_scratch0());
3510  DeoptimizeIf(ne, instr->environment());
3511 
3512  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3513  // Test for -0.
3514  Label done;
3515  __ cmp(result, Operand(0));
3516  __ b(ne, &done);
3517  __ vmov(scratch, input.high());
3518  __ tst(scratch, Operand(HeapNumber::kSignMask));
3519  DeoptimizeIf(ne, instr->environment());
3520  __ bind(&done);
3521  }
3522 }
3523 
3524 
3525 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3526  DoubleRegister input = ToDoubleRegister(instr->value());
3527  Register result = ToRegister(instr->result());
3528  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3529  Register scratch = scratch0();
3530  Label done, check_sign_on_zero;
3531 
3532  // Extract exponent bits.
3533  __ vmov(result, input.high());
3534  __ ubfx(scratch,
3535  result,
3538 
3539  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3540  __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
3541  __ mov(result, Operand(0), LeaveCC, le);
3542  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3543  __ b(le, &check_sign_on_zero);
3544  } else {
3545  __ b(le, &done);
3546  }
3547 
3548  // The following conversion will not work with numbers
3549  // outside of ]-2^32, 2^32[.
3550  __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
3551  DeoptimizeIf(ge, instr->environment());
3552 
3553  __ Vmov(double_scratch0(), 0.5, scratch);
3554  __ vadd(double_scratch0(), input, double_scratch0());
3555 
3556  // Save the original sign for later comparison.
3557  __ and_(scratch, result, Operand(HeapNumber::kSignMask));
3558 
3559  // Check sign of the result: if the sign changed, the input
3560  // value was in ]0.5, 0[ and the result should be -0.
3561  __ vmov(result, double_scratch0().high());
3562  __ eor(result, result, Operand(scratch), SetCC);
3563  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3564  DeoptimizeIf(mi, instr->environment());
3565  } else {
3566  __ mov(result, Operand(0), LeaveCC, mi);
3567  __ b(mi, &done);
3568  }
3569 
3570  __ EmitVFPTruncate(kRoundToMinusInf,
3571  result,
3572  double_scratch0(),
3573  scratch,
3574  double_scratch1);
3575  DeoptimizeIf(ne, instr->environment());
3576 
3577  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3578  // Test for -0.
3579  __ cmp(result, Operand(0));
3580  __ b(ne, &done);
3581  __ bind(&check_sign_on_zero);
3582  __ vmov(scratch, input.high());
3583  __ tst(scratch, Operand(HeapNumber::kSignMask));
3584  DeoptimizeIf(ne, instr->environment());
3585  }
3586  __ bind(&done);
3587 }
3588 
3589 
3590 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3591  DoubleRegister input = ToDoubleRegister(instr->value());
3592  DoubleRegister result = ToDoubleRegister(instr->result());
3593  __ vsqrt(result, input);
3594 }
3595 
3596 
3597 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3598  DoubleRegister input = ToDoubleRegister(instr->value());
3599  DoubleRegister result = ToDoubleRegister(instr->result());
3600  DoubleRegister temp = ToDoubleRegister(instr->temp());
3601 
3602  // Note that according to ECMA-262 15.8.2.13:
3603  // Math.pow(-Infinity, 0.5) == Infinity
3604  // Math.sqrt(-Infinity) == NaN
3605  Label done;
3606  __ vmov(temp, -V8_INFINITY, scratch0());
3607  __ VFPCompareAndSetFlags(input, temp);
3608  __ vneg(result, temp, eq);
3609  __ b(&done, eq);
3610 
3611  // Add +0 to convert -0 to +0.
3612  __ vadd(result, input, kDoubleRegZero);
3613  __ vsqrt(result, result);
3614  __ bind(&done);
3615 }
3616 
3617 
3618 void LCodeGen::DoPower(LPower* instr) {
3619  Representation exponent_type = instr->hydrogen()->right()->representation();
3620  // Having marked this as a call, we can use any registers.
3621  // Just make sure that the input/output registers are the expected ones.
3622  ASSERT(!instr->right()->IsDoubleRegister() ||
3623  ToDoubleRegister(instr->right()).is(d2));
3624  ASSERT(!instr->right()->IsRegister() ||
3625  ToRegister(instr->right()).is(r2));
3626  ASSERT(ToDoubleRegister(instr->left()).is(d1));
3627  ASSERT(ToDoubleRegister(instr->result()).is(d3));
3628 
3629  if (exponent_type.IsTagged()) {
3630  Label no_deopt;
3631  __ JumpIfSmi(r2, &no_deopt);
3633  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3634  __ cmp(r7, Operand(ip));
3635  DeoptimizeIf(ne, instr->environment());
3636  __ bind(&no_deopt);
3637  MathPowStub stub(MathPowStub::TAGGED);
3638  __ CallStub(&stub);
3639  } else if (exponent_type.IsInteger32()) {
3640  MathPowStub stub(MathPowStub::INTEGER);
3641  __ CallStub(&stub);
3642  } else {
3643  ASSERT(exponent_type.IsDouble());
3644  MathPowStub stub(MathPowStub::DOUBLE);
3645  __ CallStub(&stub);
3646  }
3647 }
3648 
3649 
3650 void LCodeGen::DoRandom(LRandom* instr) {
3651  class DeferredDoRandom: public LDeferredCode {
3652  public:
3653  DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3654  : LDeferredCode(codegen), instr_(instr) { }
3655  virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3656  virtual LInstruction* instr() { return instr_; }
3657  private:
3658  LRandom* instr_;
3659  };
3660 
3661  DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3662 
3663  // Having marked this instruction as a call we can use any
3664  // registers.
3665  ASSERT(ToDoubleRegister(instr->result()).is(d7));
3666  ASSERT(ToRegister(instr->global_object()).is(r0));
3667 
3668  static const int kSeedSize = sizeof(uint32_t);
3669  STATIC_ASSERT(kPointerSize == kSeedSize);
3670 
3672  static const int kRandomSeedOffset =
3674  __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
3675  // r2: FixedArray of the native context's random seeds
3676 
3677  // Load state[0].
3679  __ cmp(r1, Operand(0));
3680  __ b(eq, deferred->entry());
3681  // Load state[1].
3682  __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
3683  // r1: state[0].
3684  // r0: state[1].
3685 
3686  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3687  __ and_(r3, r1, Operand(0xFFFF));
3688  __ mov(r4, Operand(18273));
3689  __ mul(r3, r3, r4);
3690  __ add(r1, r3, Operand(r1, LSR, 16));
3691  // Save state[0].
3693 
3694  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3695  __ and_(r3, r0, Operand(0xFFFF));
3696  __ mov(r4, Operand(36969));
3697  __ mul(r3, r3, r4);
3698  __ add(r0, r3, Operand(r0, LSR, 16));
3699  // Save state[1].
3700  __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
3701 
3702  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3703  __ and_(r0, r0, Operand(0x3FFFF));
3704  __ add(r0, r0, Operand(r1, LSL, 14));
3705 
3706  __ bind(deferred->exit());
3707  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3708  // Create this constant using mov/orr to avoid PC relative load.
3709  __ mov(r1, Operand(0x41000000));
3710  __ orr(r1, r1, Operand(0x300000));
3711  // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
3712  __ vmov(d7, r0, r1);
3713  // Move 0x4130000000000000 to VFP.
3714  __ mov(r0, Operand(0, RelocInfo::NONE));
3715  __ vmov(d8, r0, r1);
3716  // Subtract and store the result in the heap number.
3717  __ vsub(d7, d7, d8);
3718 }
3719 
3720 
3721 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3722  __ PrepareCallCFunction(1, scratch0());
3723  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3724  // Return value is in r0.
3725 }
3726 
3727 
3728 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3729  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3730  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3732  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3733 }
3734 
3735 
3736 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3737  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3738  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3740  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3741 }
3742 
3743 
3744 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3745  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3746  TranscendentalCacheStub stub(TranscendentalCache::COS,
3748  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3749 }
3750 
3751 
3752 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3753  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3754  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3756  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3757 }
3758 
3759 
3760 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3761  switch (instr->op()) {
3762  case kMathAbs:
3763  DoMathAbs(instr);
3764  break;
3765  case kMathFloor:
3766  DoMathFloor(instr);
3767  break;
3768  case kMathRound:
3769  DoMathRound(instr);
3770  break;
3771  case kMathSqrt:
3772  DoMathSqrt(instr);
3773  break;
3774  case kMathPowHalf:
3775  DoMathPowHalf(instr);
3776  break;
3777  case kMathCos:
3778  DoMathCos(instr);
3779  break;
3780  case kMathSin:
3781  DoMathSin(instr);
3782  break;
3783  case kMathTan:
3784  DoMathTan(instr);
3785  break;
3786  case kMathLog:
3787  DoMathLog(instr);
3788  break;
3789  default:
3790  Abort("Unimplemented type of LUnaryMathOperation.");
3791  UNREACHABLE();
3792  }
3793 }
3794 
3795 
3796 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3797  ASSERT(ToRegister(instr->function()).is(r1));
3798  ASSERT(instr->HasPointerMap());
3799 
3800  if (instr->known_function().is_null()) {
3801  LPointerMap* pointers = instr->pointer_map();
3802  RecordPosition(pointers->position());
3803  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3804  ParameterCount count(instr->arity());
3805  __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3807  } else {
3808  CallKnownFunction(instr->known_function(),
3809  instr->arity(),
3810  instr,
3812  R1_CONTAINS_TARGET);
3813  }
3814 }
3815 
3816 
3817 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3818  ASSERT(ToRegister(instr->result()).is(r0));
3819 
3820  int arity = instr->arity();
3821  Handle<Code> ic =
3822  isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3823  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3825 }
3826 
3827 
3828 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3829  ASSERT(ToRegister(instr->result()).is(r0));
3830 
3831  int arity = instr->arity();
3832  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3833  Handle<Code> ic =
3834  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3835  __ mov(r2, Operand(instr->name()));
3836  CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
3837  // Restore context register.
3839 }
3840 
3841 
3842 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3843  ASSERT(ToRegister(instr->function()).is(r1));
3844  ASSERT(ToRegister(instr->result()).is(r0));
3845 
3846  int arity = instr->arity();
3847  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3848  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3850 }
3851 
3852 
3853 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3854  ASSERT(ToRegister(instr->result()).is(r0));
3855 
3856  int arity = instr->arity();
3857  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3858  Handle<Code> ic =
3859  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3860  __ mov(r2, Operand(instr->name()));
3861  CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
3863 }
3864 
3865 
3866 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3867  ASSERT(ToRegister(instr->result()).is(r0));
3868  CallKnownFunction(instr->target(),
3869  instr->arity(),
3870  instr,
3872  R1_UNINITIALIZED);
3873 }
3874 
3875 
3876 void LCodeGen::DoCallNew(LCallNew* instr) {
3877  ASSERT(ToRegister(instr->constructor()).is(r1));
3878  ASSERT(ToRegister(instr->result()).is(r0));
3879 
3880  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3881  __ mov(r0, Operand(instr->arity()));
3882  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3883 }
3884 
3885 
3886 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3887  CallRuntime(instr->function(), instr->arity(), instr);
3888 }
3889 
3890 
3891 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3892  Register object = ToRegister(instr->object());
3893  Register value = ToRegister(instr->value());
3894  Register scratch = scratch0();
3895  int offset = instr->offset();
3896 
3897  ASSERT(!object.is(value));
3898 
3899  if (!instr->transition().is_null()) {
3900  __ mov(scratch, Operand(instr->transition()));
3901  __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3902  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3903  Register temp = ToRegister(instr->temp());
3904  // Update the write barrier for the map field.
3905  __ RecordWriteField(object,
3907  scratch,
3908  temp,
3910  kSaveFPRegs,
3912  OMIT_SMI_CHECK);
3913  }
3914  }
3915 
3916  // Do the store.
3917  HType type = instr->hydrogen()->value()->type();
3918  SmiCheck check_needed =
3919  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3920  if (instr->is_in_object()) {
3921  __ str(value, FieldMemOperand(object, offset));
3922  if (instr->hydrogen()->NeedsWriteBarrier()) {
3923  // Update the write barrier for the object for in-object properties.
3924  __ RecordWriteField(object,
3925  offset,
3926  value,
3927  scratch,
3929  kSaveFPRegs,
3931  check_needed);
3932  }
3933  } else {
3934  __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3935  __ str(value, FieldMemOperand(scratch, offset));
3936  if (instr->hydrogen()->NeedsWriteBarrier()) {
3937  // Update the write barrier for the properties array.
3938  // object is used as a scratch register.
3939  __ RecordWriteField(scratch,
3940  offset,
3941  value,
3942  object,
3944  kSaveFPRegs,
3946  check_needed);
3947  }
3948  }
3949 }
3950 
3951 
3952 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3953  ASSERT(ToRegister(instr->object()).is(r1));
3954  ASSERT(ToRegister(instr->value()).is(r0));
3955 
3956  // Name is always in r2.
3957  __ mov(r2, Operand(instr->name()));
3958  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3959  ? isolate()->builtins()->StoreIC_Initialize_Strict()
3960  : isolate()->builtins()->StoreIC_Initialize();
3961  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3962 }
3963 
3964 
3965 void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
3966  HValue* value,
3967  LOperand* operand) {
3968  if (value->representation().IsTagged() && !value->type().IsSmi()) {
3969  if (operand->IsRegister()) {
3970  __ tst(ToRegister(operand), Operand(kSmiTagMask));
3971  } else {
3972  __ mov(ip, ToOperand(operand));
3973  __ tst(ip, Operand(kSmiTagMask));
3974  }
3975  DeoptimizeIf(ne, environment);
3976  }
3977 }
3978 
3979 
3980 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3981  DeoptIfTaggedButNotSmi(instr->environment(),
3982  instr->hydrogen()->length(),
3983  instr->length());
3984  DeoptIfTaggedButNotSmi(instr->environment(),
3985  instr->hydrogen()->index(),
3986  instr->index());
3987  if (instr->index()->IsConstantOperand()) {
3988  int constant_index =
3989  ToInteger32(LConstantOperand::cast(instr->index()));
3990  if (instr->hydrogen()->length()->representation().IsTagged()) {
3991  __ mov(ip, Operand(Smi::FromInt(constant_index)));
3992  } else {
3993  __ mov(ip, Operand(constant_index));
3994  }
3995  __ cmp(ip, ToRegister(instr->length()));
3996  } else {
3997  __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
3998  }
3999  DeoptimizeIf(hs, instr->environment());
4000 }
4001 
4002 
4003 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
4004  Register value = ToRegister(instr->value());
4005  Register elements = ToRegister(instr->object());
4006  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4007  Register scratch = scratch0();
4008  Register store_base = scratch;
4009  int offset = 0;
4010 
4011  // Do the store.
4012  if (instr->key()->IsConstantOperand()) {
4013  ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4014  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4015  offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4016  instr->additional_index());
4017  store_base = elements;
4018  } else {
4019  // Even though the HLoadKeyedFastElement instruction forces the input
4020  // representation for the key to be an integer, the input gets replaced
4021  // during bound check elimination with the index argument to the bounds
4022  // check, which can be tagged, so that case must be handled here, too.
4023  if (instr->hydrogen()->key()->representation().IsTagged()) {
4024  __ add(scratch, elements,
4025  Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
4026  } else {
4027  __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4028  }
4029  offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4030  }
4031  __ str(value, FieldMemOperand(store_base, offset));
4032 
4033  if (instr->hydrogen()->NeedsWriteBarrier()) {
4034  HType type = instr->hydrogen()->value()->type();
4035  SmiCheck check_needed =
4036  type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4037  // Compute address of modified element and store it into key register.
4038  __ add(key, store_base, Operand(offset - kHeapObjectTag));
4039  __ RecordWrite(elements,
4040  key,
4041  value,
4043  kSaveFPRegs,
4045  check_needed);
4046  }
4047 }
4048 
4049 
4050 void LCodeGen::DoStoreKeyedFastDoubleElement(
4051  LStoreKeyedFastDoubleElement* instr) {
4052  DwVfpRegister value = ToDoubleRegister(instr->value());
4053  Register elements = ToRegister(instr->elements());
4054  Register key = no_reg;
4055  Register scratch = scratch0();
4056  bool key_is_constant = instr->key()->IsConstantOperand();
4057  int constant_key = 0;
4058 
4059  // Calculate the effective address of the slot in the array to store the
4060  // double value.
4061  if (key_is_constant) {
4062  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4063  if (constant_key & 0xF0000000) {
4064  Abort("array index constant value too big.");
4065  }
4066  } else {
4067  key = ToRegister(instr->key());
4068  }
4069  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4070  int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
4071  ? (element_size_shift - kSmiTagSize) : element_size_shift;
4072  Operand operand = key_is_constant
4073  ? Operand((constant_key << element_size_shift) +
4075  : Operand(key, LSL, shift_size);
4076  __ add(scratch, elements, operand);
4077  if (!key_is_constant) {
4078  __ add(scratch, scratch,
4080  }
4081 
4082  if (instr->NeedsCanonicalization()) {
4083  // Check for NaN. All NaNs must be canonicalized.
4084  __ VFPCompareAndSetFlags(value, value);
4085  // Only load canonical NaN if the comparison above set the overflow.
4086  __ Vmov(value,
4088  no_reg, vs);
4089  }
4090 
4091  __ vstr(value, scratch, instr->additional_index() << element_size_shift);
4092 }
4093 
4094 
4095 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
4096  LStoreKeyedSpecializedArrayElement* instr) {
4097 
4098  Register external_pointer = ToRegister(instr->external_pointer());
4099  Register key = no_reg;
4100  ElementsKind elements_kind = instr->elements_kind();
4101  bool key_is_constant = instr->key()->IsConstantOperand();
4102  int constant_key = 0;
4103  if (key_is_constant) {
4104  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4105  if (constant_key & 0xF0000000) {
4106  Abort("array index constant value too big.");
4107  }
4108  } else {
4109  key = ToRegister(instr->key());
4110  }
4111  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4112  int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
4113  ? (element_size_shift - kSmiTagSize) : element_size_shift;
4114  int additional_offset = instr->additional_index() << element_size_shift;
4115 
4116  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
4117  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4118  CpuFeatures::Scope scope(VFP3);
4119  DwVfpRegister value(ToDoubleRegister(instr->value()));
4120  Operand operand(key_is_constant
4121  ? Operand(constant_key << element_size_shift)
4122  : Operand(key, LSL, shift_size));
4123  __ add(scratch0(), external_pointer, operand);
4124  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4125  __ vcvt_f32_f64(double_scratch0().low(), value);
4126  __ vstr(double_scratch0().low(), scratch0(), additional_offset);
4127  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
4128  __ vstr(value, scratch0(), additional_offset);
4129  }
4130  } else {
4131  Register value(ToRegister(instr->value()));
4132  MemOperand mem_operand = PrepareKeyedOperand(
4133  key, external_pointer, key_is_constant, constant_key,
4134  element_size_shift, shift_size,
4135  instr->additional_index(), additional_offset);
4136  switch (elements_kind) {
4140  __ strb(value, mem_operand);
4141  break;
4144  __ strh(value, mem_operand);
4145  break;
4146  case EXTERNAL_INT_ELEMENTS:
4148  __ str(value, mem_operand);
4149  break;
4152  case FAST_DOUBLE_ELEMENTS:
4153  case FAST_ELEMENTS:
4154  case FAST_SMI_ELEMENTS:
4156  case FAST_HOLEY_ELEMENTS:
4158  case DICTIONARY_ELEMENTS:
4160  UNREACHABLE();
4161  break;
4162  }
4163  }
4164 }
4165 
4166 
4167 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4168  ASSERT(ToRegister(instr->object()).is(r2));
4169  ASSERT(ToRegister(instr->key()).is(r1));
4170  ASSERT(ToRegister(instr->value()).is(r0));
4171 
4172  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4173  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4174  : isolate()->builtins()->KeyedStoreIC_Initialize();
4175  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4176 }
4177 
4178 
4179 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4180  Register object_reg = ToRegister(instr->object());
4181  Register new_map_reg = ToRegister(instr->new_map_temp());
4182  Register scratch = scratch0();
4183 
4184  Handle<Map> from_map = instr->original_map();
4185  Handle<Map> to_map = instr->transitioned_map();
4186  ElementsKind from_kind = from_map->elements_kind();
4187  ElementsKind to_kind = to_map->elements_kind();
4188 
4189  Label not_applicable;
4190  __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4191  __ cmp(scratch, Operand(from_map));
4192  __ b(ne, &not_applicable);
4193  __ mov(new_map_reg, Operand(to_map));
4194 
4195  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4196  __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4197  // Write barrier.
4198  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4199  scratch, kLRHasBeenSaved, kDontSaveFPRegs);
4200  } else if (IsFastSmiElementsKind(from_kind) &&
4201  IsFastDoubleElementsKind(to_kind)) {
4202  Register fixed_object_reg = ToRegister(instr->temp());
4203  ASSERT(fixed_object_reg.is(r2));
4204  ASSERT(new_map_reg.is(r3));
4205  __ mov(fixed_object_reg, object_reg);
4206  CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
4207  RelocInfo::CODE_TARGET, instr);
4208  } else if (IsFastDoubleElementsKind(from_kind) &&
4209  IsFastObjectElementsKind(to_kind)) {
4210  Register fixed_object_reg = ToRegister(instr->temp());
4211  ASSERT(fixed_object_reg.is(r2));
4212  ASSERT(new_map_reg.is(r3));
4213  __ mov(fixed_object_reg, object_reg);
4214  CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
4215  RelocInfo::CODE_TARGET, instr);
4216  } else {
4217  UNREACHABLE();
4218  }
4219  __ bind(&not_applicable);
4220 }
4221 
4222 
4223 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4224  __ push(ToRegister(instr->left()));
4225  __ push(ToRegister(instr->right()));
4226  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
4227  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4228 }
4229 
4230 
4231 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4232  class DeferredStringCharCodeAt: public LDeferredCode {
4233  public:
4234  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4235  : LDeferredCode(codegen), instr_(instr) { }
4236  virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
4237  virtual LInstruction* instr() { return instr_; }
4238  private:
4239  LStringCharCodeAt* instr_;
4240  };
4241 
4242  DeferredStringCharCodeAt* deferred =
4243  new(zone()) DeferredStringCharCodeAt(this, instr);
4244 
4246  ToRegister(instr->string()),
4247  ToRegister(instr->index()),
4248  ToRegister(instr->result()),
4249  deferred->entry());
4250  __ bind(deferred->exit());
4251 }
4252 
4253 
4254 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4255  Register string = ToRegister(instr->string());
4256  Register result = ToRegister(instr->result());
4257  Register scratch = scratch0();
4258 
4259  // TODO(3095996): Get rid of this. For now, we need to make the
4260  // result register contain a valid pointer because it is already
4261  // contained in the register pointer map.
4262  __ mov(result, Operand(0));
4263 
4264  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4265  __ push(string);
4266  // Push the index as a smi. This is safe because of the checks in
4267  // DoStringCharCodeAt above.
4268  if (instr->index()->IsConstantOperand()) {
4269  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4270  __ mov(scratch, Operand(Smi::FromInt(const_index)));
4271  __ push(scratch);
4272  } else {
4273  Register index = ToRegister(instr->index());
4274  __ SmiTag(index);
4275  __ push(index);
4276  }
4277  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4278  __ AssertSmi(r0);
4279  __ SmiUntag(r0);
4280  __ StoreToSafepointRegisterSlot(r0, result);
4281 }
4282 
4283 
4284 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4285  class DeferredStringCharFromCode: public LDeferredCode {
4286  public:
4287  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4288  : LDeferredCode(codegen), instr_(instr) { }
4289  virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4290  virtual LInstruction* instr() { return instr_; }
4291  private:
4292  LStringCharFromCode* instr_;
4293  };
4294 
4295  DeferredStringCharFromCode* deferred =
4296  new(zone()) DeferredStringCharFromCode(this, instr);
4297 
4298  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4299  Register char_code = ToRegister(instr->char_code());
4300  Register result = ToRegister(instr->result());
4301  ASSERT(!char_code.is(result));
4302 
4303  __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
4304  __ b(hi, deferred->entry());
4305  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4306  __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4307  __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4308  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4309  __ cmp(result, ip);
4310  __ b(eq, deferred->entry());
4311  __ bind(deferred->exit());
4312 }
4313 
4314 
4315 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4316  Register char_code = ToRegister(instr->char_code());
4317  Register result = ToRegister(instr->result());
4318 
4319  // TODO(3095996): Get rid of this. For now, we need to make the
4320  // result register contain a valid pointer because it is already
4321  // contained in the register pointer map.
4322  __ mov(result, Operand(0));
4323 
4324  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4325  __ SmiTag(char_code);
4326  __ push(char_code);
4327  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4328  __ StoreToSafepointRegisterSlot(r0, result);
4329 }
4330 
4331 
4332 void LCodeGen::DoStringLength(LStringLength* instr) {
4333  Register string = ToRegister(instr->string());
4334  Register result = ToRegister(instr->result());
4335  __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
4336 }
4337 
4338 
4339 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4340  LOperand* input = instr->value();
4341  ASSERT(input->IsRegister() || input->IsStackSlot());
4342  LOperand* output = instr->result();
4343  ASSERT(output->IsDoubleRegister());
4344  SwVfpRegister single_scratch = double_scratch0().low();
4345  if (input->IsStackSlot()) {
4346  Register scratch = scratch0();
4347  __ ldr(scratch, ToMemOperand(input));
4348  __ vmov(single_scratch, scratch);
4349  } else {
4350  __ vmov(single_scratch, ToRegister(input));
4351  }
4352  __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4353 }
4354 
4355 
4356 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4357  LOperand* input = instr->value();
4358  LOperand* output = instr->result();
4359 
4360  SwVfpRegister flt_scratch = double_scratch0().low();
4361  __ vmov(flt_scratch, ToRegister(input));
4362  __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4363 }
4364 
4365 
4366 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4367  class DeferredNumberTagI: public LDeferredCode {
4368  public:
4369  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4370  : LDeferredCode(codegen), instr_(instr) { }
4371  virtual void Generate() {
4372  codegen()->DoDeferredNumberTagI(instr_,
4373  instr_->value(),
4374  SIGNED_INT32);
4375  }
4376  virtual LInstruction* instr() { return instr_; }
4377  private:
4378  LNumberTagI* instr_;
4379  };
4380 
4381  Register src = ToRegister(instr->value());
4382  Register dst = ToRegister(instr->result());
4383 
4384  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4385  __ SmiTag(dst, src, SetCC);
4386  __ b(vs, deferred->entry());
4387  __ bind(deferred->exit());
4388 }
4389 
4390 
4391 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4392  class DeferredNumberTagU: public LDeferredCode {
4393  public:
4394  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4395  : LDeferredCode(codegen), instr_(instr) { }
4396  virtual void Generate() {
4397  codegen()->DoDeferredNumberTagI(instr_,
4398  instr_->value(),
4399  UNSIGNED_INT32);
4400  }
4401  virtual LInstruction* instr() { return instr_; }
4402  private:
4403  LNumberTagU* instr_;
4404  };
4405 
4406  LOperand* input = instr->value();
4407  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4408  Register reg = ToRegister(input);
4409 
4410  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4411  __ cmp(reg, Operand(Smi::kMaxValue));
4412  __ b(hi, deferred->entry());
4413  __ SmiTag(reg, reg);
4414  __ bind(deferred->exit());
4415 }
4416 
4417 
4418 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4419  LOperand* value,
4420  IntegerSignedness signedness) {
4421  Label slow;
4422  Register src = ToRegister(value);
4423  Register dst = ToRegister(instr->result());
4424  DoubleRegister dbl_scratch = double_scratch0();
4425  SwVfpRegister flt_scratch = dbl_scratch.low();
4426 
4427  // Preserve the value of all registers.
4428  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4429 
4430  Label done;
4431  if (signedness == SIGNED_INT32) {
4432  // There was overflow, so bits 30 and 31 of the original integer
4433  // disagree. Try to allocate a heap number in new space and store
4434  // the value in there. If that fails, call the runtime system.
4435  if (dst.is(src)) {
4436  __ SmiUntag(src, dst);
4437  __ eor(src, src, Operand(0x80000000));
4438  }
4439  __ vmov(flt_scratch, src);
4440  __ vcvt_f64_s32(dbl_scratch, flt_scratch);
4441  } else {
4442  __ vmov(flt_scratch, src);
4443  __ vcvt_f64_u32(dbl_scratch, flt_scratch);
4444  }
4445 
4446  if (FLAG_inline_new) {
4447  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
4448  __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
4449  __ Move(dst, r5);
4450  __ b(&done);
4451  }
4452 
4453  // Slow case: Call the runtime system to do the number allocation.
4454  __ bind(&slow);
4455 
4456  // TODO(3095996): Put a valid pointer value in the stack slot where the result
4457  // register is stored, as this register is in the pointer map, but contains an
4458  // integer value.
4459  __ mov(ip, Operand(0));
4460  __ StoreToSafepointRegisterSlot(ip, dst);
4461  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4462  __ Move(dst, r0);
4463  __ sub(dst, dst, Operand(kHeapObjectTag));
4464 
4465  // Done. Put the value in dbl_scratch into the value of the allocated heap
4466  // number.
4467  __ bind(&done);
4468  __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4469  __ add(dst, dst, Operand(kHeapObjectTag));
4470  __ StoreToSafepointRegisterSlot(dst, dst);
4471 }
4472 
4473 
4474 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4475  class DeferredNumberTagD: public LDeferredCode {
4476  public:
4477  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4478  : LDeferredCode(codegen), instr_(instr) { }
4479  virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4480  virtual LInstruction* instr() { return instr_; }
4481  private:
4482  LNumberTagD* instr_;
4483  };
4484 
4485  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4486  Register scratch = scratch0();
4487  Register reg = ToRegister(instr->result());
4488  Register temp1 = ToRegister(instr->temp());
4489  Register temp2 = ToRegister(instr->temp2());
4490 
4491  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4492  if (FLAG_inline_new) {
4493  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4494  // We want the untagged address first for performance
4495  __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4496  DONT_TAG_RESULT);
4497  } else {
4498  __ jmp(deferred->entry());
4499  }
4500  __ bind(deferred->exit());
4501  __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4502  // Now that we have finished with the object's real address tag it
4503  __ add(reg, reg, Operand(kHeapObjectTag));
4504 }
4505 
4506 
4507 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4508  // TODO(3095996): Get rid of this. For now, we need to make the
4509  // result register contain a valid pointer because it is already
4510  // contained in the register pointer map.
4511  Register reg = ToRegister(instr->result());
4512  __ mov(reg, Operand(0));
4513 
4514  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4515  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4516  __ sub(r0, r0, Operand(kHeapObjectTag));
4517  __ StoreToSafepointRegisterSlot(r0, reg);
4518 }
4519 
4520 
4521 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4522  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4523  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4524 }
4525 
4526 
4527 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4528  Register input = ToRegister(instr->value());
4529  Register result = ToRegister(instr->result());
4530  if (instr->needs_check()) {
4532  // If the input is a HeapObject, SmiUntag will set the carry flag.
4533  __ SmiUntag(result, input, SetCC);
4534  DeoptimizeIf(cs, instr->environment());
4535  } else {
4536  __ SmiUntag(result, input);
4537  }
4538 }
4539 
4540 
4541 void LCodeGen::EmitNumberUntagD(Register input_reg,
4542  DoubleRegister result_reg,
4543  bool deoptimize_on_undefined,
4544  bool deoptimize_on_minus_zero,
4545  LEnvironment* env) {
4546  Register scratch = scratch0();
4547  SwVfpRegister flt_scratch = double_scratch0().low();
4548  ASSERT(!result_reg.is(double_scratch0()));
4549 
4550  Label load_smi, heap_number, done;
4551 
4552  // Smi check.
4553  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4554 
4555  // Heap number map check.
4556  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4557  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4558  __ cmp(scratch, Operand(ip));
4559  if (deoptimize_on_undefined) {
4560  DeoptimizeIf(ne, env);
4561  } else {
4562  Label heap_number;
4563  __ b(eq, &heap_number);
4564 
4565  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4566  __ cmp(input_reg, Operand(ip));
4567  DeoptimizeIf(ne, env);
4568 
4569  // Convert undefined to NaN.
4570  __ LoadRoot(ip, Heap::kNanValueRootIndex);
4571  __ sub(ip, ip, Operand(kHeapObjectTag));
4572  __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4573  __ jmp(&done);
4574 
4575  __ bind(&heap_number);
4576  }
4577  // Heap number to double register conversion.
4578  __ sub(ip, input_reg, Operand(kHeapObjectTag));
4579  __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4580  if (deoptimize_on_minus_zero) {
4581  __ vmov(ip, result_reg.low());
4582  __ cmp(ip, Operand(0));
4583  __ b(ne, &done);
4584  __ vmov(ip, result_reg.high());
4585  __ cmp(ip, Operand(HeapNumber::kSignMask));
4586  DeoptimizeIf(eq, env);
4587  }
4588  __ jmp(&done);
4589 
4590  // Smi to double register conversion
4591  __ bind(&load_smi);
4592  // scratch: untagged value of input_reg
4593  __ vmov(flt_scratch, scratch);
4594  __ vcvt_f64_s32(result_reg, flt_scratch);
4595  __ bind(&done);
4596 }
4597 
4598 
4599 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4600  Register input_reg = ToRegister(instr->value());
4601  Register scratch1 = scratch0();
4602  Register scratch2 = ToRegister(instr->temp());
4603  DwVfpRegister double_scratch = double_scratch0();
4604  DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
4605 
4606  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4607  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4608 
4609  Label done;
4610 
4611  // The input was optimistically untagged; revert it.
4612  // The carry flag is set when we reach this deferred code as we just executed
4613  // SmiUntag(heap_object, SetCC)
4615  __ adc(input_reg, input_reg, Operand(input_reg));
4616 
4617  // Heap number map check.
4618  __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4619  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4620  __ cmp(scratch1, Operand(ip));
4621 
4622  if (instr->truncating()) {
4623  Register scratch3 = ToRegister(instr->temp2());
4624  SwVfpRegister single_scratch = double_scratch.low();
4625  ASSERT(!scratch3.is(input_reg) &&
4626  !scratch3.is(scratch1) &&
4627  !scratch3.is(scratch2));
4628  // Performs a truncating conversion of a floating point number as used by
4629  // the JS bitwise operations.
4630  Label heap_number;
4631  __ b(eq, &heap_number);
4632  // Check for undefined. Undefined is converted to zero for truncating
4633  // conversions.
4634  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4635  __ cmp(input_reg, Operand(ip));
4636  DeoptimizeIf(ne, instr->environment());
4637  __ mov(input_reg, Operand(0));
4638  __ b(&done);
4639 
4640  __ bind(&heap_number);
4641  __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
4642  __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
4643 
4644  __ EmitECMATruncate(input_reg,
4645  double_scratch2,
4646  single_scratch,
4647  scratch1,
4648  scratch2,
4649  scratch3);
4650 
4651  } else {
4652  CpuFeatures::Scope scope(VFP3);
4653  // Deoptimize if we don't have a heap number.
4654  DeoptimizeIf(ne, instr->environment());
4655 
4656  __ sub(ip, input_reg, Operand(kHeapObjectTag));
4657  __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
4658  __ EmitVFPTruncate(kRoundToZero,
4659  input_reg,
4660  double_scratch,
4661  scratch1,
4662  double_scratch2,
4664  DeoptimizeIf(ne, instr->environment());
4665 
4666  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4667  __ cmp(input_reg, Operand(0));
4668  __ b(ne, &done);
4669  __ vmov(scratch1, double_scratch.high());
4670  __ tst(scratch1, Operand(HeapNumber::kSignMask));
4671  DeoptimizeIf(ne, instr->environment());
4672  }
4673  }
4674  __ bind(&done);
4675 }
4676 
4677 
4678 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4679  class DeferredTaggedToI: public LDeferredCode {
4680  public:
4681  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4682  : LDeferredCode(codegen), instr_(instr) { }
4683  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4684  virtual LInstruction* instr() { return instr_; }
4685  private:
4686  LTaggedToI* instr_;
4687  };
4688 
4689  LOperand* input = instr->value();
4690  ASSERT(input->IsRegister());
4691  ASSERT(input->Equals(instr->result()));
4692 
4693  Register input_reg = ToRegister(input);
4694 
4695  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4696 
4697  // Optimistically untag the input.
4698  // If the input is a HeapObject, SmiUntag will set the carry flag.
4699  __ SmiUntag(input_reg, SetCC);
4700  // Branch to deferred code if the input was tagged.
4701  // The deferred code will take care of restoring the tag.
4702  __ b(cs, deferred->entry());
4703  __ bind(deferred->exit());
4704 }
4705 
4706 
4707 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4708  LOperand* input = instr->value();
4709  ASSERT(input->IsRegister());
4710  LOperand* result = instr->result();
4711  ASSERT(result->IsDoubleRegister());
4712 
4713  Register input_reg = ToRegister(input);
4714  DoubleRegister result_reg = ToDoubleRegister(result);
4715 
4716  EmitNumberUntagD(input_reg, result_reg,
4717  instr->hydrogen()->deoptimize_on_undefined(),
4718  instr->hydrogen()->deoptimize_on_minus_zero(),
4719  instr->environment());
4720 }
4721 
4722 
4723 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4724  Register result_reg = ToRegister(instr->result());
4725  Register scratch1 = scratch0();
4726  Register scratch2 = ToRegister(instr->temp());
4727  DwVfpRegister double_input = ToDoubleRegister(instr->value());
4728 
4729  Label done;
4730 
4731  if (instr->truncating()) {
4732  Register scratch3 = ToRegister(instr->temp2());
4733  SwVfpRegister single_scratch = double_scratch0().low();
4734  __ EmitECMATruncate(result_reg,
4735  double_input,
4736  single_scratch,
4737  scratch1,
4738  scratch2,
4739  scratch3);
4740  } else {
4741  DwVfpRegister double_scratch = double_scratch0();
4742  __ EmitVFPTruncate(kRoundToMinusInf,
4743  result_reg,
4744  double_input,
4745  scratch1,
4746  double_scratch,
4748 
4749  // Deoptimize if we had a vfp invalid exception,
4750  // including inexact operation.
4751  DeoptimizeIf(ne, instr->environment());
4752  }
4753  __ bind(&done);
4754 }
4755 
4756 
4757 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4758  LOperand* input = instr->value();
4759  __ tst(ToRegister(input), Operand(kSmiTagMask));
4760  DeoptimizeIf(ne, instr->environment());
4761 }
4762 
4763 
4764 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4765  LOperand* input = instr->value();
4766  __ tst(ToRegister(input), Operand(kSmiTagMask));
4767  DeoptimizeIf(eq, instr->environment());
4768 }
4769 
4770 
4771 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4772  Register input = ToRegister(instr->value());
4773  Register scratch = scratch0();
4774 
4775  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4776  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4777 
4778  if (instr->hydrogen()->is_interval_check()) {
4779  InstanceType first;
4780  InstanceType last;
4781  instr->hydrogen()->GetCheckInterval(&first, &last);
4782 
4783  __ cmp(scratch, Operand(first));
4784 
4785  // If there is only one type in the interval check for equality.
4786  if (first == last) {
4787  DeoptimizeIf(ne, instr->environment());
4788  } else {
4789  DeoptimizeIf(lo, instr->environment());
4790  // Omit check for the last type.
4791  if (last != LAST_TYPE) {
4792  __ cmp(scratch, Operand(last));
4793  DeoptimizeIf(hi, instr->environment());
4794  }
4795  }
4796  } else {
4797  uint8_t mask;
4798  uint8_t tag;
4799  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4800 
4801  if (IsPowerOf2(mask)) {
4802  ASSERT(tag == 0 || IsPowerOf2(tag));
4803  __ tst(scratch, Operand(mask));
4804  DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
4805  } else {
4806  __ and_(scratch, scratch, Operand(mask));
4807  __ cmp(scratch, Operand(tag));
4808  DeoptimizeIf(ne, instr->environment());
4809  }
4810  }
4811 }
4812 
4813 
4814 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4815  Register reg = ToRegister(instr->value());
4816  Handle<JSFunction> target = instr->hydrogen()->target();
4817  if (isolate()->heap()->InNewSpace(*target)) {
4818  Register reg = ToRegister(instr->value());
4819  Handle<JSGlobalPropertyCell> cell =
4820  isolate()->factory()->NewJSGlobalPropertyCell(target);
4821  __ mov(ip, Operand(Handle<Object>(cell)));
4823  __ cmp(reg, ip);
4824  } else {
4825  __ cmp(reg, Operand(target));
4826  }
4827  DeoptimizeIf(ne, instr->environment());
4828 }
4829 
4830 
4831 void LCodeGen::DoCheckMapCommon(Register reg,
4832  Register scratch,
4833  Handle<Map> map,
4834  CompareMapMode mode,
4835  LEnvironment* env) {
4836  Label success;
4837  __ CompareMap(reg, scratch, map, &success, mode);
4838  DeoptimizeIf(ne, env);
4839  __ bind(&success);
4840 }
4841 
4842 
4843 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4844  Register scratch = scratch0();
4845  LOperand* input = instr->value();
4846  ASSERT(input->IsRegister());
4847  Register reg = ToRegister(input);
4848 
4849  Label success;
4850  SmallMapList* map_set = instr->hydrogen()->map_set();
4851  for (int i = 0; i < map_set->length() - 1; i++) {
4852  Handle<Map> map = map_set->at(i);
4853  __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
4854  __ b(eq, &success);
4855  }
4856  Handle<Map> map = map_set->last();
4857  DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
4858  __ bind(&success);
4859 }
4860 
4861 
4862 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4863  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4864  Register result_reg = ToRegister(instr->result());
4865  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
4866  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4867 }
4868 
4869 
4870 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4871  Register unclamped_reg = ToRegister(instr->unclamped());
4872  Register result_reg = ToRegister(instr->result());
4873  __ ClampUint8(result_reg, unclamped_reg);
4874 }
4875 
4876 
4877 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4878  Register scratch = scratch0();
4879  Register input_reg = ToRegister(instr->unclamped());
4880  Register result_reg = ToRegister(instr->result());
4881  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
4882  Label is_smi, done, heap_number;
4883 
4884  // Both smi and heap number cases are handled.
4885  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
4886 
4887  // Check for heap number
4888  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4889  __ cmp(scratch, Operand(factory()->heap_number_map()));
4890  __ b(eq, &heap_number);
4891 
4892  // Check for undefined. Undefined is converted to zero for clamping
4893  // conversions.
4894  __ cmp(input_reg, Operand(factory()->undefined_value()));
4895  DeoptimizeIf(ne, instr->environment());
4896  __ mov(result_reg, Operand(0));
4897  __ jmp(&done);
4898 
4899  // Heap number
4900  __ bind(&heap_number);
4901  __ vldr(double_scratch0(), FieldMemOperand(input_reg,
4903  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4904  __ jmp(&done);
4905 
4906  // smi
4907  __ bind(&is_smi);
4908  __ ClampUint8(result_reg, result_reg);
4909 
4910  __ bind(&done);
4911 }
4912 
4913 
4914 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4915  Register temp1 = ToRegister(instr->temp());
4916  Register temp2 = ToRegister(instr->temp2());
4917 
4918  Handle<JSObject> holder = instr->holder();
4919  Handle<JSObject> current_prototype = instr->prototype();
4920 
4921  // Load prototype object.
4922  __ LoadHeapObject(temp1, current_prototype);
4923 
4924  // Check prototype maps up to the holder.
4925  while (!current_prototype.is_identical_to(holder)) {
4926  DoCheckMapCommon(temp1, temp2,
4927  Handle<Map>(current_prototype->map()),
4928  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4929  current_prototype =
4930  Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4931  // Load next prototype object.
4932  __ LoadHeapObject(temp1, current_prototype);
4933  }
4934 
4935  // Check the holder map.
4936  DoCheckMapCommon(temp1, temp2,
4937  Handle<Map>(current_prototype->map()),
4938  ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4939  DeoptimizeIf(ne, instr->environment());
4940 }
4941 
4942 
4943 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4944  class DeferredAllocateObject: public LDeferredCode {
4945  public:
4946  DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4947  : LDeferredCode(codegen), instr_(instr) { }
4948  virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4949  virtual LInstruction* instr() { return instr_; }
4950  private:
4951  LAllocateObject* instr_;
4952  };
4953 
4954  DeferredAllocateObject* deferred =
4955  new(zone()) DeferredAllocateObject(this, instr);
4956 
4957  Register result = ToRegister(instr->result());
4958  Register scratch = ToRegister(instr->temp());
4959  Register scratch2 = ToRegister(instr->temp2());
4960  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4961  Handle<Map> initial_map(constructor->initial_map());
4962  int instance_size = initial_map->instance_size();
4963  ASSERT(initial_map->pre_allocated_property_fields() +
4964  initial_map->unused_property_fields() -
4965  initial_map->inobject_properties() == 0);
4966 
4967  // Allocate memory for the object. The initial map might change when
4968  // the constructor's prototype changes, but instance size and property
4969  // counts remain unchanged (if slack tracking finished).
4970  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4971  __ AllocateInNewSpace(instance_size,
4972  result,
4973  scratch,
4974  scratch2,
4975  deferred->entry(),
4976  TAG_OBJECT);
4977 
4978  __ bind(deferred->exit());
4979  if (FLAG_debug_code) {
4980  Label is_in_new_space;
4981  __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4982  __ Abort("Allocated object is not in new-space");
4983  __ bind(&is_in_new_space);
4984  }
4985 
4986  // Load the initial map.
4987  Register map = scratch;
4988  __ LoadHeapObject(map, constructor);
4990 
4991  // Initialize map and fields of the newly allocated object.
4992  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4993  __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
4994  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4995  __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
4996  __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
4997  if (initial_map->inobject_properties() != 0) {
4998  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4999  for (int i = 0; i < initial_map->inobject_properties(); i++) {
5000  int property_offset = JSObject::kHeaderSize + i * kPointerSize;
5001  __ str(scratch, FieldMemOperand(result, property_offset));
5002  }
5003  }
5004 }
5005 
5006 
5007 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
5008  Register result = ToRegister(instr->result());
5009  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
5010  Handle<Map> initial_map(constructor->initial_map());
5011  int instance_size = initial_map->instance_size();
5012 
5013  // TODO(3095996): Get rid of this. For now, we need to make the
5014  // result register contain a valid pointer because it is already
5015  // contained in the register pointer map.
5016  __ mov(result, Operand(0));
5017 
5018  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5019  __ mov(r0, Operand(Smi::FromInt(instance_size)));
5020  __ push(r0);
5021  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5022  __ StoreToSafepointRegisterSlot(r0, result);
5023 }
5024 
5025 
5026 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
5027  Handle<FixedArray> literals(instr->environment()->closure()->literals());
5028  ElementsKind boilerplate_elements_kind =
5029  instr->hydrogen()->boilerplate_elements_kind();
5030 
5031  // Deopt if the array literal boilerplate ElementsKind is of a type different
5032  // than the expected one. The check isn't necessary if the boilerplate has
5033  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
5035  boilerplate_elements_kind, true)) {
5036  __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
5037  // Load map into r2.
5039  // Load the map's "bit field 2".
5041  // Retrieve elements_kind from bit field 2.
5043  __ cmp(r2, Operand(boilerplate_elements_kind));
5044  DeoptimizeIf(ne, instr->environment());
5045  }
5046 
5047  // Set up the parameters to the stub/runtime call.
5048  __ LoadHeapObject(r3, literals);
5049  __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5050  // Boilerplate already exists, constant elements are never accessed.
5051  // Pass an empty fixed array.
5052  __ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
5053  __ Push(r3, r2, r1);
5054 
5055  // Pick the right runtime function or stub to call.
5056  int length = instr->hydrogen()->length();
5057  if (instr->hydrogen()->IsCopyOnWrite()) {
5058  ASSERT(instr->hydrogen()->depth() == 1);
5061  FastCloneShallowArrayStub stub(mode, length);
5062  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5063  } else if (instr->hydrogen()->depth() > 1) {
5064  CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
5066  CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
5067  } else {
5069  boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
5072  FastCloneShallowArrayStub stub(mode, length);
5073  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5074  }
5075 }
5076 
5077 
5078 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
5079  Register result,
5080  Register source,
5081  int* offset) {
5082  ASSERT(!source.is(r2));
5083  ASSERT(!result.is(r2));
5084 
5085  // Only elements backing stores for non-COW arrays need to be copied.
5086  Handle<FixedArrayBase> elements(object->elements());
5087  bool has_elements = elements->length() > 0 &&
5088  elements->map() != isolate()->heap()->fixed_cow_array_map();
5089 
5090  // Increase the offset so that subsequent objects end up right after
5091  // this object and its backing store.
5092  int object_offset = *offset;
5093  int object_size = object->map()->instance_size();
5094  int elements_offset = *offset + object_size;
5095  int elements_size = has_elements ? elements->Size() : 0;
5096  *offset += object_size + elements_size;
5097 
5098  // Copy object header.
5099  ASSERT(object->properties()->length() == 0);
5100  int inobject_properties = object->map()->inobject_properties();
5101  int header_size = object_size - inobject_properties * kPointerSize;
5102  for (int i = 0; i < header_size; i += kPointerSize) {
5103  if (has_elements && i == JSObject::kElementsOffset) {
5104  __ add(r2, result, Operand(elements_offset));
5105  } else {
5106  __ ldr(r2, FieldMemOperand(source, i));
5107  }
5108  __ str(r2, FieldMemOperand(result, object_offset + i));
5109  }
5110 
5111  // Copy in-object properties.
5112  for (int i = 0; i < inobject_properties; i++) {
5113  int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
5114  Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
5115  if (value->IsJSObject()) {
5116  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
5117  __ add(r2, result, Operand(*offset));
5118  __ str(r2, FieldMemOperand(result, total_offset));
5119  __ LoadHeapObject(source, value_object);
5120  EmitDeepCopy(value_object, result, source, offset);
5121  } else if (value->IsHeapObject()) {
5122  __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
5123  __ str(r2, FieldMemOperand(result, total_offset));
5124  } else {
5125  __ mov(r2, Operand(value));
5126  __ str(r2, FieldMemOperand(result, total_offset));
5127  }
5128  }
5129 
5130  if (has_elements) {
5131  // Copy elements backing store header.
5132  __ LoadHeapObject(source, elements);
5133  for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
5134  __ ldr(r2, FieldMemOperand(source, i));
5135  __ str(r2, FieldMemOperand(result, elements_offset + i));
5136  }
5137 
5138  // Copy elements backing store content.
5139  int elements_length = has_elements ? elements->length() : 0;
5140  if (elements->IsFixedDoubleArray()) {
5141  Handle<FixedDoubleArray> double_array =
5143  for (int i = 0; i < elements_length; i++) {
5144  int64_t value = double_array->get_representation(i);
5145  // We only support little endian mode...
5146  int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
5147  int32_t value_high = static_cast<int32_t>(value >> 32);
5148  int total_offset =
5149  elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
5150  __ mov(r2, Operand(value_low));
5151  __ str(r2, FieldMemOperand(result, total_offset));
5152  __ mov(r2, Operand(value_high));
5153  __ str(r2, FieldMemOperand(result, total_offset + 4));
5154  }
5155  } else if (elements->IsFixedArray()) {
5156  Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
5157  for (int i = 0; i < elements_length; i++) {
5158  int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
5159  Handle<Object> value(fast_elements->get(i));
5160  if (value->IsJSObject()) {
5161  Handle<JSObject> value_object = Handle<JSObject>::cast(value);
5162  __ add(r2, result, Operand(*offset));
5163  __ str(r2, FieldMemOperand(result, total_offset));
5164  __ LoadHeapObject(source, value_object);
5165  EmitDeepCopy(value_object, result, source, offset);
5166  } else if (value->IsHeapObject()) {
5167  __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
5168  __ str(r2, FieldMemOperand(result, total_offset));
5169  } else {
5170  __ mov(r2, Operand(value));
5171  __ str(r2, FieldMemOperand(result, total_offset));
5172  }
5173  }
5174  } else {
5175  UNREACHABLE();
5176  }
5177  }
5178 }
5179 
5180 
5181 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
5182  int size = instr->hydrogen()->total_size();
5183  ElementsKind boilerplate_elements_kind =
5184  instr->hydrogen()->boilerplate()->GetElementsKind();
5185 
5186  // Deopt if the array literal boilerplate ElementsKind is of a type different
5187  // than the expected one. The check isn't necessary if the boilerplate has
5188  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
5190  boilerplate_elements_kind, true)) {
5191  __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
5192  // Load map into r2.
5194  // Load the map's "bit field 2".
5196  // Retrieve elements_kind from bit field 2.
5198  __ cmp(r2, Operand(boilerplate_elements_kind));
5199  DeoptimizeIf(ne, instr->environment());
5200  }
5201 
5202  // Allocate all objects that are part of the literal in one big
5203  // allocation. This avoids multiple limit checks.
5204  Label allocated, runtime_allocate;
5205  __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5206  __ jmp(&allocated);
5207 
5208  __ bind(&runtime_allocate);
5209  __ mov(r0, Operand(Smi::FromInt(size)));
5210  __ push(r0);
5211  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5212 
5213  __ bind(&allocated);
5214  int offset = 0;
5215  __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
5216  EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
5217  ASSERT_EQ(size, offset);
5218 }
5219 
5220 
5221 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
5222  Handle<FixedArray> literals(instr->environment()->closure()->literals());
5223  Handle<FixedArray> constant_properties =
5224  instr->hydrogen()->constant_properties();
5225 
5226  // Set up the parameters to the stub/runtime call.
5227  __ LoadHeapObject(r4, literals);
5228  __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5229  __ mov(r2, Operand(constant_properties));
5230  int flags = instr->hydrogen()->fast_elements()
5233  __ mov(r1, Operand(Smi::FromInt(flags)));
5234  __ Push(r4, r3, r2, r1);
5235 
5236  // Pick the right runtime function or stub to call.
5237  int properties_count = constant_properties->length() / 2;
5238  if (instr->hydrogen()->depth() > 1) {
5239  CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
5240  } else if (flags != ObjectLiteral::kFastElements ||
5242  CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
5243  } else {
5244  FastCloneShallowObjectStub stub(properties_count);
5245  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5246  }
5247 }
5248 
5249 
5250 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5251  ASSERT(ToRegister(instr->value()).is(r0));
5252  __ push(r0);
5253  CallRuntime(Runtime::kToFastProperties, 1, instr);
5254 }
5255 
5256 
5257 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5258  Label materialized;
5259  // Registers will be used as follows:
5260  // r7 = literals array.
5261  // r1 = regexp literal.
5262  // r0 = regexp literal clone.
5263  // r2 and r4-r6 are used as temporaries.
5264  int literal_offset =
5265  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5266  __ LoadHeapObject(r7, instr->hydrogen()->literals());
5267  __ ldr(r1, FieldMemOperand(r7, literal_offset));
5268  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5269  __ cmp(r1, ip);
5270  __ b(ne, &materialized);
5271 
5272  // Create regexp literal using runtime function
5273  // Result will be in r0.
5274  __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5275  __ mov(r5, Operand(instr->hydrogen()->pattern()));
5276  __ mov(r4, Operand(instr->hydrogen()->flags()));
5277  __ Push(r7, r6, r5, r4);
5278  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5279  __ mov(r1, r0);
5280 
5281  __ bind(&materialized);
5283  Label allocated, runtime_allocate;
5284 
5285  __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5286  __ jmp(&allocated);
5287 
5288  __ bind(&runtime_allocate);
5289  __ mov(r0, Operand(Smi::FromInt(size)));
5290  __ Push(r1, r0);
5291  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5292  __ pop(r1);
5293 
5294  __ bind(&allocated);
5295  // Copy the content into the newly allocated memory.
5296  // (Unroll copy loop once for better throughput).
5297  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5298  __ ldr(r3, FieldMemOperand(r1, i));
5299  __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
5300  __ str(r3, FieldMemOperand(r0, i));
5301  __ str(r2, FieldMemOperand(r0, i + kPointerSize));
5302  }
5303  if ((size % (2 * kPointerSize)) != 0) {
5304  __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
5305  __ str(r3, FieldMemOperand(r0, size - kPointerSize));
5306  }
5307 }
5308 
5309 
5310 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5311  // Use the fast case closure allocation code that allocates in new
5312  // space for nested functions that don't need literals cloning.
5313  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
5314  bool pretenure = instr->hydrogen()->pretenure();
5315  if (!pretenure && shared_info->num_literals() == 0) {
5316  FastNewClosureStub stub(shared_info->language_mode());
5317  __ mov(r1, Operand(shared_info));
5318  __ push(r1);
5319  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5320  } else {
5321  __ mov(r2, Operand(shared_info));
5322  __ mov(r1, Operand(pretenure
5323  ? factory()->true_value()
5324  : factory()->false_value()));
5325  __ Push(cp, r2, r1);
5326  CallRuntime(Runtime::kNewClosure, 3, instr);
5327  }
5328 }
5329 
5330 
5331 void LCodeGen::DoTypeof(LTypeof* instr) {
5332  Register input = ToRegister(instr->value());
5333  __ push(input);
5334  CallRuntime(Runtime::kTypeof, 1, instr);
5335 }
5336 
5337 
5338 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5339  Register input = ToRegister(instr->value());
5340  int true_block = chunk_->LookupDestination(instr->true_block_id());
5341  int false_block = chunk_->LookupDestination(instr->false_block_id());
5342  Label* true_label = chunk_->GetAssemblyLabel(true_block);
5343  Label* false_label = chunk_->GetAssemblyLabel(false_block);
5344 
5345  Condition final_branch_condition = EmitTypeofIs(true_label,
5346  false_label,
5347  input,
5348  instr->type_literal());
5349  if (final_branch_condition != kNoCondition) {
5350  EmitBranch(true_block, false_block, final_branch_condition);
5351  }
5352 }
5353 
5354 
5355 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5356  Label* false_label,
5357  Register input,
5358  Handle<String> type_name) {
5359  Condition final_branch_condition = kNoCondition;
5360  Register scratch = scratch0();
5361  if (type_name->Equals(heap()->number_symbol())) {
5362  __ JumpIfSmi(input, true_label);
5363  __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
5364  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5365  __ cmp(input, Operand(ip));
5366  final_branch_condition = eq;
5367 
5368  } else if (type_name->Equals(heap()->string_symbol())) {
5369  __ JumpIfSmi(input, false_label);
5370  __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
5371  __ b(ge, false_label);
5372  __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5373  __ tst(ip, Operand(1 << Map::kIsUndetectable));
5374  final_branch_condition = eq;
5375 
5376  } else if (type_name->Equals(heap()->boolean_symbol())) {
5377  __ CompareRoot(input, Heap::kTrueValueRootIndex);
5378  __ b(eq, true_label);
5379  __ CompareRoot(input, Heap::kFalseValueRootIndex);
5380  final_branch_condition = eq;
5381 
5382  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
5383  __ CompareRoot(input, Heap::kNullValueRootIndex);
5384  final_branch_condition = eq;
5385 
5386  } else if (type_name->Equals(heap()->undefined_symbol())) {
5387  __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5388  __ b(eq, true_label);
5389  __ JumpIfSmi(input, false_label);
5390  // Check for undetectable objects => true.
5391  __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
5392  __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5393  __ tst(ip, Operand(1 << Map::kIsUndetectable));
5394  final_branch_condition = ne;
5395 
5396  } else if (type_name->Equals(heap()->function_symbol())) {
5398  __ JumpIfSmi(input, false_label);
5399  __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
5400  __ b(eq, true_label);
5401  __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
5402  final_branch_condition = eq;
5403 
5404  } else if (type_name->Equals(heap()->object_symbol())) {
5405  __ JumpIfSmi(input, false_label);
5406  if (!FLAG_harmony_typeof) {
5407  __ CompareRoot(input, Heap::kNullValueRootIndex);
5408  __ b(eq, true_label);
5409  }
5410  __ CompareObjectType(input, input, scratch,
5412  __ b(lt, false_label);
5413  __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5414  __ b(gt, false_label);
5415  // Check for undetectable objects => false.
5416  __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5417  __ tst(ip, Operand(1 << Map::kIsUndetectable));
5418  final_branch_condition = eq;
5419 
5420  } else {
5421  __ b(false_label);
5422  }
5423 
5424  return final_branch_condition;
5425 }
5426 
5427 
5428 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5429  Register temp1 = ToRegister(instr->temp());
5430  int true_block = chunk_->LookupDestination(instr->true_block_id());
5431  int false_block = chunk_->LookupDestination(instr->false_block_id());
5432 
5433  EmitIsConstructCall(temp1, scratch0());
5434  EmitBranch(true_block, false_block, eq);
5435 }
5436 
5437 
5438 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5439  ASSERT(!temp1.is(temp2));
5440  // Get the frame pointer for the calling frame.
5442 
5443  // Skip the arguments adaptor frame if it exists.
5444  Label check_frame_marker;
5446  __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5447  __ b(ne, &check_frame_marker);
5449 
5450  // Check the marker in the calling frame.
5451  __ bind(&check_frame_marker);
5453  __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5454 }
5455 
5456 
5457 void LCodeGen::EnsureSpaceForLazyDeopt() {
5458  // Ensure that we have enough space after the previous lazy-bailout
5459  // instruction for patching the code here.
5460  int current_pc = masm()->pc_offset();
5461  int patch_size = Deoptimizer::patch_size();
5462  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5463  // Block literal pool emission for duration of padding.
5464  Assembler::BlockConstPoolScope block_const_pool(masm());
5465  int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5466  ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5467  while (padding_size > 0) {
5468  __ nop();
5469  padding_size -= Assembler::kInstrSize;
5470  }
5471  }
5472  last_lazy_deopt_pc_ = masm()->pc_offset();
5473 }
5474 
5475 
5476 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5477  EnsureSpaceForLazyDeopt();
5478  ASSERT(instr->HasEnvironment());
5479  LEnvironment* env = instr->environment();
5480  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5481  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5482 }
5483 
5484 
5485 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5486  DeoptimizeIf(al, instr->environment());
5487 }
5488 
5489 
5490 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5491  Register object = ToRegister(instr->object());
5492  Register key = ToRegister(instr->key());
5493  Register strict = scratch0();
5494  __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
5495  __ Push(object, key, strict);
5496  ASSERT(instr->HasPointerMap());
5497  LPointerMap* pointers = instr->pointer_map();
5498  RecordPosition(pointers->position());
5499  SafepointGenerator safepoint_generator(
5500  this, pointers, Safepoint::kLazyDeopt);
5501  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
5502 }
5503 
5504 
5505 void LCodeGen::DoIn(LIn* instr) {
5506  Register obj = ToRegister(instr->object());
5507  Register key = ToRegister(instr->key());
5508  __ Push(key, obj);
5509  ASSERT(instr->HasPointerMap());
5510  LPointerMap* pointers = instr->pointer_map();
5511  RecordPosition(pointers->position());
5512  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
5513  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
5514 }
5515 
5516 
5517 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5518  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5519  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5520  RecordSafepointWithLazyDeopt(
5521  instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5522  ASSERT(instr->HasEnvironment());
5523  LEnvironment* env = instr->environment();
5524  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5525 }
5526 
5527 
5528 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5529  class DeferredStackCheck: public LDeferredCode {
5530  public:
5531  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5532  : LDeferredCode(codegen), instr_(instr) { }
5533  virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5534  virtual LInstruction* instr() { return instr_; }
5535  private:
5536  LStackCheck* instr_;
5537  };
5538 
5539  ASSERT(instr->HasEnvironment());
5540  LEnvironment* env = instr->environment();
5541  // There is no LLazyBailout instruction for stack-checks. We have to
5542  // prepare for lazy deoptimization explicitly here.
5543  if (instr->hydrogen()->is_function_entry()) {
5544  // Perform stack overflow check.
5545  Label done;
5546  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5547  __ cmp(sp, Operand(ip));
5548  __ b(hs, &done);
5549  StackCheckStub stub;
5550  PredictableCodeSizeScope predictable(masm_);
5551  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5552  EnsureSpaceForLazyDeopt();
5553  __ bind(&done);
5554  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5555  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5556  } else {
5557  ASSERT(instr->hydrogen()->is_backwards_branch());
5558  // Perform stack overflow check if this goto needs it before jumping.
5559  DeferredStackCheck* deferred_stack_check =
5560  new(zone()) DeferredStackCheck(this, instr);
5561  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5562  __ cmp(sp, Operand(ip));
5563  __ b(lo, deferred_stack_check->entry());
5564  EnsureSpaceForLazyDeopt();
5565  __ bind(instr->done_label());
5566  deferred_stack_check->SetExit(instr->done_label());
5567  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5568  // Don't record a deoptimization index for the safepoint here.
5569  // This will be done explicitly when emitting call and the safepoint in
5570  // the deferred code.
5571  }
5572 }
5573 
5574 
5575 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5576  // This is a pseudo-instruction that ensures that the environment here is
5577  // properly registered for deoptimization and records the assembler's PC
5578  // offset.
5579  LEnvironment* environment = instr->environment();
5580  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5581  instr->SpilledDoubleRegisterArray());
5582 
5583  // If the environment were already registered, we would have no way of
5584  // backpatching it with the spill slot operands.
5585  ASSERT(!environment->HasBeenRegistered());
5586  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5587  ASSERT(osr_pc_offset_ == -1);
5588  osr_pc_offset_ = masm()->pc_offset();
5589 }
5590 
5591 
5592 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5593  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5594  __ cmp(r0, ip);
5595  DeoptimizeIf(eq, instr->environment());
5596 
5597  Register null_value = r5;
5598  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5599  __ cmp(r0, null_value);
5600  DeoptimizeIf(eq, instr->environment());
5601 
5602  __ tst(r0, Operand(kSmiTagMask));
5603  DeoptimizeIf(eq, instr->environment());
5604 
5606  __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5607  DeoptimizeIf(le, instr->environment());
5608 
5609  Label use_cache, call_runtime;
5610  __ CheckEnumCache(null_value, &call_runtime);
5611 
5613  __ b(&use_cache);
5614 
5615  // Get the set of properties to enumerate.
5616  __ bind(&call_runtime);
5617  __ push(r0);
5618  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5619 
5621  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5622  __ cmp(r1, ip);
5623  DeoptimizeIf(ne, instr->environment());
5624  __ bind(&use_cache);
5625 }
5626 
5627 
5628 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5629  Register map = ToRegister(instr->map());
5630  Register result = ToRegister(instr->result());
5631  Label load_cache, done;
5632  __ EnumLength(result, map);
5633  __ cmp(result, Operand(Smi::FromInt(0)));
5634  __ b(ne, &load_cache);
5635  __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5636  __ jmp(&done);
5637 
5638  __ bind(&load_cache);
5639  __ LoadInstanceDescriptors(map, result);
5640  __ ldr(result,
5642  __ ldr(result,
5643  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5644  __ cmp(result, Operand(0));
5645  DeoptimizeIf(eq, instr->environment());
5646 
5647  __ bind(&done);
5648 }
5649 
5650 
5651 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5652  Register object = ToRegister(instr->value());
5653  Register map = ToRegister(instr->map());
5654  __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5655  __ cmp(map, scratch0());
5656  DeoptimizeIf(ne, instr->environment());
5657 }
5658 
5659 
5660 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5661  Register object = ToRegister(instr->object());
5662  Register index = ToRegister(instr->index());
5663  Register result = ToRegister(instr->result());
5664  Register scratch = scratch0();
5665 
5666  Label out_of_object, done;
5667  __ cmp(index, Operand(0));
5668  __ b(lt, &out_of_object);
5669 
5671  __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5672  __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5673 
5674  __ b(&done);
5675 
5676  __ bind(&out_of_object);
5677  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5678  // Index is equal to negated out of object property index plus 1.
5679  __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5680  __ ldr(result, FieldMemOperand(scratch,
5681  FixedArray::kHeaderSize - kPointerSize));
5682  __ bind(&done);
5683 }
5684 
5685 
5686 #undef __
5687 
5688 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
const int kMinInt
Definition: globals.h:211
static const int kBitFieldOffset
Definition: objects.h:5160
static LGap * cast(LInstruction *instr)
Definition: lithium-arm.h:327
const uint32_t kVFPZConditionFlagBit
const intptr_t kSmiTagMask
Definition: v8.h:4016
bool is_int24(int x)
Definition: assembler.h:839
static const int kCodeEntryOffset
Definition: objects.h:6182
static const int kMaxAsciiCharCode
Definition: objects.h:7327
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:6183
static int SlotOffset(int index)
Definition: contexts.h:425
const Register r3
const DivMagicNumbers DivMagicNumberFor(int32_t divisor)
Definition: utils.cc:93
static const int kEnumCacheOffset
Definition: objects.h:2632
const Register cp
static Smi * FromInt(int value)
Definition: objects-inl.h:981
bool IsFastObjectElementsKind(ElementsKind kind)
const DwVfpRegister d8
const int KB
Definition: globals.h:207
static TypeFeedbackId None()
Definition: utils.h:999
static const int kElementsKindBitCount
Definition: objects.h:5182
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
Definition: handles.h:81
static const int kGlobalReceiverOffset
Definition: objects.h:6288
const Register r6
static const int kExponentBias
Definition: objects.h:1356
int int32_t
Definition: unicode.cc:47
static bool IsSupported(CpuFeature f)
static const int kExternalPointerOffset
Definition: objects.h:3741
static const int kSize
Definition: objects.h:6625
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
friend class BlockConstPoolScope
const int kPointerSizeLog2
Definition: globals.h:232
static const int kInObjectFieldCount
Definition: objects.h:6679
static const int kMaximumSlots
Definition: code-stubs.h:344
MemOperand GlobalObjectOperand()
const Register r2
static const int kInstanceClassNameOffset
Definition: objects.h:5800
int WhichPowerOf2(uint32_t x)
Definition: utils.h:56
bool is_uint32(int64_t x)
Definition: assembler-x64.h:48
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:6187
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
Definition: handles.cc:326
#define V8_INFINITY
Definition: globals.h:32
static const int kHashFieldOffset
Definition: objects.h:7319
static DwVfpRegister FromAllocationIndex(int index)
Condition ReverseCondition(Condition cond)
#define IN
const Register sp
const uint32_t kSlotsZapValue
Definition: v8globals.h:83
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7318
static const int kExponentShift
Definition: objects.h:1357
static const int kValueOffset
Definition: objects.h:1342
const uint32_t kHoleNanUpper32
Definition: v8globals.h:469
const Register ip
const int kPointerSize
Definition: globals.h:220
static const int kPcLoadDelta
static void MaybeCallEntryHook(MacroAssembler *masm)
const DwVfpRegister d7
const int kHeapObjectTag
Definition: v8.h:4009
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:271
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
static const int kCacheStampOffset
Definition: objects.h:6476
const Register pc
static const int kPropertiesOffset
Definition: objects.h:2171
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
Definition: objects.h:2439
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
const DwVfpRegister d3
const Register r0
static const int kElementsOffset
Definition: objects.h:2172
static const int kContainsCachedArrayIndexMask
Definition: objects.h:7374
bool IsPowerOf2(T x)
Definition: utils.h:50
static Vector< T > New(int length)
Definition: utils.h:370
int ElementsKindToShiftSize(ElementsKind elements_kind)
Definition: lithium.cc:230
Vector< const char > CStrVector(const char *data)
Definition: utils.h:526
static int OffsetOfElementAt(int index)
Definition: objects.h:2356
static const int kLengthOffset
Definition: objects.h:8332
static int SizeFor(int length)
Definition: objects.h:2353
static const int kHeaderSize
Definition: objects.h:2296
const Register lr
static const int kMapOffset
Definition: objects.h:1261
static const int kValueOffset
Definition: objects.h:6468
bool is(Register reg) const
const Register r1
static const int kLengthOffset
Definition: objects.h:2295
#define kDoubleRegZero
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:457
MemOperand FieldMemOperand(Register object, int offset)
#define __
static const int kHasNonInstancePrototype
Definition: objects.h:5167
ElementsKind GetInitialFastElementsKind()
const uint32_t kVFPVConditionFlagBit
static const uint32_t kSignMask
Definition: objects.h:1351
const int kSmiTagSize
Definition: v8.h:4015
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
static const int kElementsKindShift
Definition: objects.h:5181
static const int kConstructorOffset
Definition: objects.h:5127
const DwVfpRegister d2
static double canonical_not_the_hole_nan_as_double()
Definition: objects-inl.h:1776
static const int kIsUndetectable
Definition: objects.h:5171
static const int kHeaderSize
Definition: objects.h:2173
static const int kInstrSize
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPrototypeOffset
Definition: objects.h:5126
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kValueOffset
Definition: objects.h:6385
const DwVfpRegister d1
const Register fp
static const int kNativeContextOffset
Definition: objects.h:6286
static const int kExponentBits
Definition: objects.h:1355
static const int kCompilerHintsOffset
Definition: objects.h:5868
static const int kSharedFunctionInfoOffset
Definition: objects.h:6185
Register ToRegister(int num)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
static const int kMaxValue
Definition: objects.h:1050
static const int kBitField2Offset
Definition: objects.h:5161
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
Definition: ic.cc:2565
#define ARRAY_SIZE(a)
Definition: globals.h:281
static const int kExponentOffset
Definition: objects.h:1348
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1258
static JSObject * cast(Object *obj)
const Register r5
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
Definition: objects.h:5158
virtual void BeforeCall(int call_size) const
static const int kMantissaOffset
Definition: objects.h:1347
const Register r4
const Register r7