43 Safepoint::DeoptMode mode)
52 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 Safepoint::DeoptMode deopt_mode_;
64 bool LCodeGen::GenerateCode() {
65 HPhase phase(
"Z_Code generation", chunk());
68 CpuFeatures::Scope scope1(
VFP3);
69 CpuFeatures::Scope scope2(
ARMv7);
71 CodeStub::GenerateFPStubs();
78 return GeneratePrologue() &&
80 GenerateDeferredCode() &&
81 GenerateDeoptJumpTable() &&
82 GenerateSafepointTable();
86 void LCodeGen::FinishCode(Handle<Code>
code) {
88 code->set_stack_slots(GetStackSlotCount());
89 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90 PopulateDeoptimizationData(code);
94 void LCodeGen::Abort(
const char* reason) {
95 info()->set_bailout_reason(reason);
100 void LCodeGen::Comment(
const char* format, ...) {
101 if (!FLAG_code_comments)
return;
103 StringBuilder builder(buffer,
ARRAY_SIZE(buffer));
105 va_start(arguments, format);
106 builder.AddFormattedList(format, arguments);
111 size_t length = builder.position();
113 memcpy(copy.start(), builder.Finalize(), copy.length());
114 masm()->RecordComment(copy.start());
118 bool LCodeGen::GeneratePrologue() {
124 if (strlen(FLAG_stop_at) > 0 &&
125 info_->function()->name()->IsEqualTo(
CStrVector(FLAG_stop_at))) {
139 if (!info_->is_classic_mode() || info_->is_native()) {
141 __ cmp(
r5, Operand(0));
143 int receiver_offset = scope()->num_parameters() *
kPointerSize;
144 __ LoadRoot(
r2, Heap::kUndefinedValueRootIndex);
153 int slots = GetStackSlotCount();
155 if (FLAG_debug_code) {
156 __ mov(
r0, Operand(slots));
170 if (heap_slots > 0) {
171 Comment(
";;; Allocate local context");
175 FastNewContextStub stub(heap_slots);
178 __ CallRuntime(Runtime::kNewFunctionContext, 1);
180 RecordSafepoint(Safepoint::kNoLazyDeopt);
185 int num_parameters = scope()->num_parameters();
186 for (
int i = 0; i < num_parameters; i++) {
187 Variable* var = scope()->parameter(i);
188 if (var->IsContextSlot()) {
197 __ RecordWriteContextSlot(
201 Comment(
";;; End allocate local context");
206 __ CallRuntime(Runtime::kTraceEnter, 0);
208 return !is_aborted();
212 bool LCodeGen::GenerateBody() {
214 bool emit_instructions =
true;
215 for (current_instruction_ = 0;
216 !is_aborted() && current_instruction_ < instructions_->length();
217 current_instruction_++) {
218 LInstruction* instr = instructions_->at(current_instruction_);
219 if (instr->IsLabel()) {
221 emit_instructions = !label->HasReplacement();
224 if (emit_instructions) {
225 Comment(
";;; @%d: %s.", current_instruction_, instr->Mnemonic());
226 instr->CompileToNative(
this);
229 EnsureSpaceForLazyDeopt();
230 return !is_aborted();
234 bool LCodeGen::GenerateDeferredCode() {
236 if (deferred_.length() > 0) {
237 for (
int i = 0; !is_aborted() && i < deferred_.length(); i++) {
238 LDeferredCode* code = deferred_[i];
239 __ bind(code->entry());
240 Comment(
";;; Deferred code @%d: %s.",
241 code->instruction_index(),
242 code->instr()->Mnemonic());
244 __ jmp(code->exit());
250 masm()->CheckConstPool(
true,
false);
252 return !is_aborted();
256 bool LCodeGen::GenerateDeoptJumpTable() {
265 deopt_jump_table_.length() * 2)) {
266 Abort(
"Generated code is too large");
270 __ BlockConstPoolFor(deopt_jump_table_.length());
271 __ RecordComment(
"[ Deoptimisation jump table");
273 __ bind(&table_start);
274 for (
int i = 0; i < deopt_jump_table_.length(); i++) {
275 __ bind(&deopt_jump_table_[i].label);
277 __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
279 ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
280 deopt_jump_table_.length() * 2);
281 __ RecordComment(
"]");
285 if (!is_aborted()) status_ =
DONE;
286 return !is_aborted();
290 bool LCodeGen::GenerateSafepointTable() {
292 safepoints_.Emit(masm(), GetStackSlotCount());
293 return !is_aborted();
313 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
314 if (op->IsRegister()) {
316 }
else if (op->IsConstantOperand()) {
318 HConstant* constant = chunk_->LookupConstant(const_op);
319 Handle<Object> literal = constant->handle();
320 Representation r = chunk_->LookupLiteralRepresentation(const_op);
321 if (r.IsInteger32()) {
322 ASSERT(literal->IsNumber());
323 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
324 }
else if (r.IsDouble()) {
325 Abort(
"EmitLoadRegister: Unsupported double immediate.");
328 if (literal->IsSmi()) {
329 __ mov(scratch, Operand(literal));
331 __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
335 }
else if (op->IsStackSlot() || op->IsArgument()) {
336 __ ldr(scratch, ToMemOperand(op));
345 ASSERT(op->IsDoubleRegister());
346 return ToDoubleRegister(op->index());
351 SwVfpRegister flt_scratch,
353 if (op->IsDoubleRegister()) {
354 return ToDoubleRegister(op->index());
355 }
else if (op->IsConstantOperand()) {
357 HConstant* constant = chunk_->LookupConstant(const_op);
358 Handle<Object> literal = constant->handle();
359 Representation r = chunk_->LookupLiteralRepresentation(const_op);
360 if (r.IsInteger32()) {
361 ASSERT(literal->IsNumber());
362 __ mov(
ip, Operand(static_cast<int32_t>(literal->Number())));
363 __ vmov(flt_scratch,
ip);
364 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
366 }
else if (r.IsDouble()) {
367 Abort(
"unsupported double immediate");
368 }
else if (r.IsTagged()) {
369 Abort(
"unsupported tagged immediate");
371 }
else if (op->IsStackSlot() || op->IsArgument()) {
375 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
383 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op)
const {
384 HConstant* constant = chunk_->LookupConstant(op);
385 ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
386 return constant->handle();
390 bool LCodeGen::IsInteger32(LConstantOperand* op)
const {
391 return chunk_->LookupLiteralRepresentation(op).IsInteger32();
395 int LCodeGen::ToInteger32(LConstantOperand* op)
const {
396 HConstant* constant = chunk_->LookupConstant(op);
397 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
398 ASSERT(constant->HasInteger32Value());
399 return constant->Integer32Value();
403 double LCodeGen::ToDouble(LConstantOperand* op)
const {
404 HConstant* constant = chunk_->LookupConstant(op);
405 ASSERT(constant->HasDoubleValue());
406 return constant->DoubleValue();
410 Operand LCodeGen::ToOperand(LOperand* op) {
411 if (op->IsConstantOperand()) {
413 HConstant* constant = chunk()->LookupConstant(const_op);
414 Representation r = chunk_->LookupLiteralRepresentation(const_op);
415 if (r.IsInteger32()) {
416 ASSERT(constant->HasInteger32Value());
417 return Operand(constant->Integer32Value());
418 }
else if (r.IsDouble()) {
419 Abort(
"ToOperand Unsupported double immediate.");
422 return Operand(constant->handle());
423 }
else if (op->IsRegister()) {
425 }
else if (op->IsDoubleRegister()) {
426 Abort(
"ToOperand IsDoubleRegister unimplemented");
435 MemOperand LCodeGen::ToMemOperand(LOperand* op)
const {
436 ASSERT(!op->IsRegister());
437 ASSERT(!op->IsDoubleRegister());
438 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
439 int index = op->index();
451 MemOperand LCodeGen::ToHighMemOperand(LOperand* op)
const {
452 ASSERT(op->IsDoubleStackSlot());
453 int index = op->index();
466 void LCodeGen::WriteTranslation(LEnvironment* environment,
467 Translation* translation,
468 int* arguments_index,
469 int* arguments_count) {
470 if (environment ==
NULL)
return;
473 int translation_size = environment->values()->length();
475 int height = translation_size - environment->parameter_count();
481 *arguments_index = -environment->parameter_count();
482 *arguments_count = environment->parameter_count();
484 WriteTranslation(environment->outer(),
488 int closure_id = *info()->closure() != *environment->closure()
489 ? DefineDeoptimizationLiteral(environment->closure())
490 : Translation::kSelfLiteralId;
492 switch (environment->frame_type()) {
494 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
497 translation->BeginConstructStubFrame(closure_id, translation_size);
500 ASSERT(translation_size == 1);
502 translation->BeginGetterStubFrame(closure_id);
505 ASSERT(translation_size == 2);
507 translation->BeginSetterStubFrame(closure_id);
510 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
516 if (environment->entry() !=
NULL &&
517 environment->entry()->arguments_pushed()) {
518 *arguments_index = *arguments_index < 0
519 ? GetStackSlotCount()
520 : *arguments_index + *arguments_count;
521 *arguments_count = environment->entry()->arguments_count() + 1;
524 for (
int i = 0; i < translation_size; ++i) {
525 LOperand* value = environment->values()->at(i);
528 if (environment->spilled_registers() !=
NULL && value !=
NULL) {
529 if (value->IsRegister() &&
530 environment->spilled_registers()[value->index()] !=
NULL) {
531 translation->MarkDuplicate();
532 AddToTranslation(translation,
533 environment->spilled_registers()[value->index()],
534 environment->HasTaggedValueAt(i),
535 environment->HasUint32ValueAt(i),
539 value->IsDoubleRegister() &&
540 environment->spilled_double_registers()[value->index()] !=
NULL) {
541 translation->MarkDuplicate();
544 environment->spilled_double_registers()[value->index()],
552 AddToTranslation(translation,
554 environment->HasTaggedValueAt(i),
555 environment->HasUint32ValueAt(i),
562 void LCodeGen::AddToTranslation(Translation* translation,
567 int arguments_count) {
572 translation->StoreArgumentsObject(arguments_index, arguments_count);
573 }
else if (op->IsStackSlot()) {
575 translation->StoreStackSlot(op->index());
576 }
else if (is_uint32) {
577 translation->StoreUint32StackSlot(op->index());
579 translation->StoreInt32StackSlot(op->index());
581 }
else if (op->IsDoubleStackSlot()) {
582 translation->StoreDoubleStackSlot(op->index());
583 }
else if (op->IsArgument()) {
585 int src_index = GetStackSlotCount() + op->index();
586 translation->StoreStackSlot(src_index);
587 }
else if (op->IsRegister()) {
590 translation->StoreRegister(reg);
591 }
else if (is_uint32) {
592 translation->StoreUint32Register(reg);
594 translation->StoreInt32Register(reg);
596 }
else if (op->IsDoubleRegister()) {
598 translation->StoreDoubleRegister(reg);
599 }
else if (op->IsConstantOperand()) {
601 int src_index = DefineDeoptimizationLiteral(constant->handle());
602 translation->StoreLiteral(src_index);
609 void LCodeGen::CallCode(Handle<Code> code,
610 RelocInfo::Mode mode,
613 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
617 void LCodeGen::CallCodeGeneric(Handle<Code> code,
618 RelocInfo::Mode mode,
620 SafepointMode safepoint_mode,
626 LPointerMap* pointers = instr->pointer_map();
627 RecordPosition(pointers->position());
629 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
633 if (code->kind() == Code::BINARY_OP_IC ||
634 code->kind() == Code::COMPARE_IC) {
640 void LCodeGen::CallRuntime(
const Runtime::Function*
function,
642 LInstruction* instr) {
644 LPointerMap* pointers = instr->pointer_map();
646 RecordPosition(pointers->position());
648 __ CallRuntime(
function, num_arguments);
649 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
655 LInstruction* instr) {
656 __ CallRuntimeSaveDoubles(
id);
657 RecordSafepointWithRegisters(
658 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
662 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
663 Safepoint::DeoptMode mode) {
664 if (!environment->HasBeenRegistered()) {
679 int jsframe_count = 0;
682 for (LEnvironment* e = environment; e !=
NULL; e = e->outer()) {
688 Translation translation(&translations_, frame_count, jsframe_count, zone());
689 WriteTranslation(environment, &translation, &args_index, &args_count);
690 int deoptimization_index = deoptimizations_.length();
691 int pc_offset = masm()->pc_offset();
692 environment->Register(deoptimization_index,
694 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
695 deoptimizations_.Add(environment, zone());
700 void LCodeGen::DeoptimizeIf(
Condition cc, LEnvironment* environment) {
701 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
702 ASSERT(environment->HasBeenRegistered());
703 int id = environment->deoptimization_index();
706 Abort(
"bailout was not prepared");
710 ASSERT(FLAG_deopt_every_n_times < 2);
712 if (FLAG_deopt_every_n_times == 1 &&
713 info_->shared_info()->opt_count() == id) {
718 if (FLAG_trap_on_deopt)
__ stop(
"trap_on_deopt", cc);
725 if (deopt_jump_table_.is_empty() ||
726 (deopt_jump_table_.last().address != entry)) {
727 deopt_jump_table_.Add(JumpTableEntry(entry), zone());
729 __ b(cc, &deopt_jump_table_.last().label);
734 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
735 int length = deoptimizations_.length();
736 if (length == 0)
return;
737 Handle<DeoptimizationInputData> data =
738 factory()->NewDeoptimizationInputData(length,
TENURED);
740 Handle<ByteArray> translations = translations_.CreateByteArray();
741 data->SetTranslationByteArray(*translations);
742 data->SetInlinedFunctionCount(
Smi::FromInt(inlined_function_count_));
744 Handle<FixedArray> literals =
745 factory()->NewFixedArray(deoptimization_literals_.length(),
TENURED);
746 for (
int i = 0; i < deoptimization_literals_.length(); i++) {
747 literals->set(i, *deoptimization_literals_[i]);
749 data->SetLiteralArray(*literals);
751 data->SetOsrAstId(
Smi::FromInt(info_->osr_ast_id().ToInt()));
755 for (
int i = 0; i < length; i++) {
756 LEnvironment* env = deoptimizations_[i];
757 data->SetAstId(i, env->ast_id());
758 data->SetTranslationIndex(i,
Smi::FromInt(env->translation_index()));
759 data->SetArgumentsStackHeight(i,
763 code->set_deoptimization_data(*data);
767 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
768 int result = deoptimization_literals_.length();
769 for (
int i = 0; i < deoptimization_literals_.length(); ++i) {
770 if (deoptimization_literals_[i].is_identical_to(literal))
return i;
772 deoptimization_literals_.Add(literal, zone());
777 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
778 ASSERT(deoptimization_literals_.length() == 0);
780 const ZoneList<Handle<JSFunction> >* inlined_closures =
781 chunk()->inlined_closures();
783 for (
int i = 0, length = inlined_closures->length();
786 DefineDeoptimizationLiteral(inlined_closures->at(i));
789 inlined_function_count_ = deoptimization_literals_.length();
793 void LCodeGen::RecordSafepointWithLazyDeopt(
794 LInstruction* instr, SafepointMode safepoint_mode) {
795 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
796 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
798 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
799 RecordSafepointWithRegisters(
800 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
805 void LCodeGen::RecordSafepoint(
806 LPointerMap* pointers,
807 Safepoint::Kind kind,
809 Safepoint::DeoptMode deopt_mode) {
810 ASSERT(expected_safepoint_kind_ == kind);
812 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
813 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
814 kind, arguments, deopt_mode);
815 for (
int i = 0; i < operands->length(); i++) {
816 LOperand* pointer = operands->at(i);
817 if (pointer->IsStackSlot()) {
818 safepoint.DefinePointerSlot(pointer->index(), zone());
819 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
820 safepoint.DefinePointerRegister(
ToRegister(pointer), zone());
823 if (kind & Safepoint::kWithRegisters) {
825 safepoint.DefinePointerRegister(
cp, zone());
830 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
831 Safepoint::DeoptMode deopt_mode) {
832 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
836 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
837 LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
838 RecordSafepoint(&empty_pointers, deopt_mode);
842 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
844 Safepoint::DeoptMode deopt_mode) {
846 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
850 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
851 LPointerMap* pointers,
853 Safepoint::DeoptMode deopt_mode) {
855 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
859 void LCodeGen::RecordPosition(
int position) {
860 if (position == RelocInfo::kNoPosition)
return;
861 masm()->positions_recorder()->RecordPosition(position);
865 void LCodeGen::DoLabel(LLabel* label) {
866 if (label->is_loop_header()) {
867 Comment(
";;; B%d - LOOP entry", label->block_id());
869 Comment(
";;; B%d", label->block_id());
871 __ bind(label->label());
872 current_block_ = label->block_id();
877 void LCodeGen::DoParallelMove(LParallelMove* move) {
878 resolver_.Resolve(move);
882 void LCodeGen::DoGap(LGap* gap) {
887 LParallelMove* move = gap->GetParallelMove(inner_pos);
888 if (move !=
NULL) DoParallelMove(move);
893 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
898 void LCodeGen::DoParameter(LParameter* instr) {
903 void LCodeGen::DoCallStub(LCallStub* instr) {
905 switch (instr->hydrogen()->major_key()) {
906 case CodeStub::RegExpConstructResult: {
907 RegExpConstructResultStub stub;
908 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
911 case CodeStub::RegExpExec: {
913 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
918 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
921 case CodeStub::NumberToString: {
922 NumberToStringStub stub;
923 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
926 case CodeStub::StringAdd: {
928 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
931 case CodeStub::StringCompare: {
932 StringCompareStub stub;
933 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
936 case CodeStub::TranscendentalCache: {
938 TranscendentalCacheStub stub(instr->transcendental_type(),
940 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
949 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
954 void LCodeGen::DoModI(LModI* instr) {
955 if (instr->hydrogen()->HasPowerOf2Divisor()) {
956 Register dividend =
ToRegister(instr->left());
957 Register result =
ToRegister(instr->result());
962 if (divisor < 0) divisor = -divisor;
964 Label positive_dividend, done;
965 __ cmp(dividend, Operand(0));
966 __ b(
pl, &positive_dividend);
967 __ rsb(result, dividend, Operand(0));
968 __ and_(result, result, Operand(divisor - 1),
SetCC);
970 DeoptimizeIf(
eq, instr->environment());
972 __ rsb(result, result, Operand(0));
974 __ bind(&positive_dividend);
975 __ and_(result, dividend, Operand(divisor - 1));
983 Register result =
ToRegister(instr->result());
987 CpuFeatures::Scope scope(
SUDIV);
990 __ cmp(right, Operand(0));
991 DeoptimizeIf(
eq, instr->environment());
998 __ sdiv(result, left, right);
999 __ mls(result, result, right, left);
1000 __ cmp(result, Operand(0));
1004 __ cmp(left, Operand(0));
1005 DeoptimizeIf(
lt, instr->environment());
1008 Register scratch = scratch0();
1009 Register scratch2 =
ToRegister(instr->temp());
1010 DwVfpRegister dividend = ToDoubleRegister(instr->temp2());
1011 DwVfpRegister divisor = ToDoubleRegister(instr->temp3());
1012 DwVfpRegister quotient = double_scratch0();
1014 ASSERT(!dividend.is(divisor));
1015 ASSERT(!dividend.is(quotient));
1016 ASSERT(!divisor.is(quotient));
1017 ASSERT(!scratch.is(left));
1018 ASSERT(!scratch.is(right));
1019 ASSERT(!scratch.is(result));
1021 Label vfp_modulo, both_positive, right_negative;
1025 __ cmp(right, Operand(0));
1026 DeoptimizeIf(
eq, instr->environment());
1029 __ Move(result, left);
1032 __ cmp(left, Operand(0));
1035 __ vmov(divisor.low(), right);
1036 __ b(
lt, &vfp_modulo);
1038 __ cmp(left, Operand(right));
1042 __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
1047 __ and_(result, scratch, Operand(left));
1050 __ bind(&right_negative);
1052 __ rsb(right, right, Operand(0));
1054 __ bind(&both_positive);
1055 const int kUnfolds = 3;
1059 __ mov(scratch, left);
1060 for (
int i = 0; i < kUnfolds; i++) {
1063 __ cmp(scratch, Operand(right));
1068 if (i < kUnfolds - 1)
__ sub(scratch, scratch, right);
1071 __ bind(&vfp_modulo);
1075 __ vmov(dividend.low(), left);
1080 __ vcvt_f64_s32(dividend, dividend.low());
1081 __ vcvt_f64_s32(divisor, divisor.low());
1084 __ vabs(divisor, divisor);
1086 __ vdiv(quotient, dividend, divisor);
1087 __ vcvt_s32_f64(quotient.low(), quotient);
1088 __ vcvt_f64_s32(quotient, quotient.low());
1091 DwVfpRegister double_scratch = dividend;
1092 __ vmul(double_scratch, divisor, quotient);
1093 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
1094 __ vmov(scratch, double_scratch.low());
1097 __ sub(result, left, scratch);
1101 __ sub(scratch2, left, scratch,
SetCC);
1103 __ cmp(left, Operand(0));
1104 DeoptimizeIf(
mi, instr->environment());
1107 __ mov(result, scratch2);
1114 void LCodeGen::EmitSignedIntegerDivisionByConstant(
1120 LEnvironment* environment) {
1122 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1124 uint32_t divisor_abs = abs(divisor);
1127 CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1129 switch (divisor_abs) {
1131 DeoptimizeIf(
al, environment);
1136 __ Move(result, dividend);
1138 __ rsb(result, dividend, Operand(0),
SetCC);
1139 DeoptimizeIf(
vs, environment);
1142 __ mov(remainder, Operand(0));
1151 __ mov(scratch, Operand(dividend,
ASR, power - 1));
1153 __ add(scratch, dividend, Operand(scratch,
LSR, 32 - power));
1154 __ mov(result, Operand(scratch,
ASR, power));
1160 __ rsb(result, result, Operand(0));
1164 __ sub(remainder, dividend, Operand(result,
LSL, power));
1166 __ add(remainder, dividend, Operand(result,
LSL, power));
1180 DivMagicNumbers magic_numbers =
1184 const int32_t M = magic_numbers.M;
1185 const int32_t s = magic_numbers.s + power_of_2_factor;
1187 __ mov(
ip, Operand(M));
1188 __ smull(
ip, scratch, dividend,
ip);
1190 __ add(scratch, scratch, Operand(dividend));
1193 __ mov(scratch, Operand(scratch,
ASR, s));
1195 __ add(result, scratch, Operand(dividend,
LSR, 31));
1196 if (divisor < 0)
__ rsb(result, result, Operand(0));
1198 __ mov(
ip, Operand(divisor));
1201 __ mul(scratch, result,
ip);
1202 __ sub(remainder, dividend, scratch);
1208 void LCodeGen::DoDivI(LDivI* instr) {
1209 class DeferredDivI:
public LDeferredCode {
1211 DeferredDivI(LCodeGen* codegen, LDivI* instr)
1212 : LDeferredCode(codegen), instr_(instr) { }
1213 virtual void Generate() {
1214 codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(),
1219 virtual LInstruction* instr() {
return instr_; }
1224 const Register left =
ToRegister(instr->left());
1225 const Register right =
ToRegister(instr->right());
1226 const Register scratch = scratch0();
1227 const Register result =
ToRegister(instr->result());
1231 __ cmp(right, Operand(0));
1232 DeoptimizeIf(
eq, instr->environment());
1237 Label left_not_zero;
1238 __ cmp(left, Operand(0));
1239 __ b(
ne, &left_not_zero);
1240 __ cmp(right, Operand(0));
1241 DeoptimizeIf(
mi, instr->environment());
1242 __ bind(&left_not_zero);
1247 Label left_not_min_int;
1249 __ b(
ne, &left_not_min_int);
1250 __ cmp(right, Operand(-1));
1251 DeoptimizeIf(
eq, instr->environment());
1252 __ bind(&left_not_min_int);
1255 Label done, deoptimize;
1257 __ cmp(right, Operand(1));
1261 __ cmp(right, Operand(2));
1262 __ tst(left, Operand(1),
eq);
1266 __ cmp(right, Operand(4));
1267 __ tst(left, Operand(3),
eq);
1273 DeferredDivI* deferred =
new(zone()) DeferredDivI(
this, instr);
1275 __ TrySmiTag(left, &deoptimize, scratch);
1276 __ TrySmiTag(right, &deoptimize, scratch);
1278 __ b(
al, deferred->entry());
1279 __ bind(deferred->exit());
1282 __ JumpIfNotSmi(result, &deoptimize);
1283 __ SmiUntag(result);
1286 __ bind(&deoptimize);
1287 DeoptimizeIf(
al, instr->environment());
1292 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1293 const Register result =
ToRegister(instr->result());
1294 const Register left =
ToRegister(instr->left());
1295 const Register remainder =
ToRegister(instr->temp());
1296 const Register scratch = scratch0();
1301 ASSERT(instr->right()->IsConstantOperand());
1304 __ cmp(left, Operand(0));
1305 DeoptimizeIf(
eq, instr->environment());
1307 EmitSignedIntegerDivisionByConstant(result,
1312 instr->environment());
1314 __ cmp(remainder, Operand(0));
1315 __ teq(remainder, Operand(divisor),
ne);
1320 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
1321 LOperand* left_argument,
1322 LOperand* right_argument,
1327 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
1331 }
else if (left.is(
r0) && right.is(
r1)) {
1333 }
else if (left.is(
r0)) {
1344 RecordSafepointWithRegistersAndDoubles(pointer_map,
1346 Safepoint::kNoLazyDeopt);
1348 __ StoreToSafepointRegistersAndDoublesSlot(
r0,
r0);
1352 void LCodeGen::DoMulI(LMulI* instr) {
1353 Register scratch = scratch0();
1354 Register result =
ToRegister(instr->result());
1357 LOperand* right_op = instr->right();
1360 bool bailout_on_minus_zero =
1363 if (right_op->IsConstantOperand() && !can_overflow) {
1367 if (bailout_on_minus_zero && (constant < 0)) {
1370 __ cmp(left, Operand(0));
1371 DeoptimizeIf(
eq, instr->environment());
1376 __ rsb(result, left, Operand(0));
1379 if (bailout_on_minus_zero) {
1382 __ cmp(left, Operand(0));
1383 DeoptimizeIf(
mi, instr->environment());
1385 __ mov(result, Operand(0));
1388 __ Move(result, left);
1394 int32_t mask = constant >> 31;
1395 uint32_t constant_abs = (constant + mask) ^ mask;
1402 __ mov(result, Operand(left,
LSL, shift));
1405 __ add(result, left, Operand(left,
LSL, shift));
1408 __ rsb(result, left, Operand(left,
LSL, shift));
1412 if (constant < 0)
__ rsb(result, result, Operand(0));
1416 __ mov(
ip, Operand(constant));
1417 __ mul(result, left,
ip);
1422 Register right = EmitLoadRegister(right_op, scratch);
1423 if (bailout_on_minus_zero) {
1429 __ smull(result, scratch, left, right);
1430 __ cmp(scratch, Operand(result,
ASR, 31));
1431 DeoptimizeIf(
ne, instr->environment());
1433 __ mul(result, left, right);
1436 if (bailout_on_minus_zero) {
1439 __ cmp(result, Operand(0));
1442 DeoptimizeIf(
mi, instr->environment());
1449 void LCodeGen::DoBitI(LBitI* instr) {
1450 LOperand* left_op = instr->left();
1451 LOperand* right_op = instr->right();
1452 ASSERT(left_op->IsRegister());
1454 Register result =
ToRegister(instr->result());
1457 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1458 right = Operand(EmitLoadRegister(right_op,
ip));
1460 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1461 right = ToOperand(right_op);
1464 switch (instr->op()) {
1465 case Token::BIT_AND:
1466 __ and_(result, left, right);
1469 __ orr(result, left, right);
1471 case Token::BIT_XOR:
1472 __ eor(result, left, right);
1481 void LCodeGen::DoShiftI(LShiftI* instr) {
1484 LOperand* right_op = instr->right();
1486 Register result =
ToRegister(instr->result());
1487 Register scratch = scratch0();
1488 if (right_op->IsRegister()) {
1491 switch (instr->op()) {
1493 __ mov(result, Operand(left,
ASR, scratch));
1496 if (instr->can_deopt()) {
1497 __ mov(result, Operand(left,
LSR, scratch),
SetCC);
1498 DeoptimizeIf(
mi, instr->environment());
1500 __ mov(result, Operand(left,
LSR, scratch));
1504 __ mov(result, Operand(left,
LSL, scratch));
1513 uint8_t shift_count =
static_cast<uint8_t
>(value & 0x1F);
1514 switch (instr->op()) {
1516 if (shift_count != 0) {
1517 __ mov(result, Operand(left,
ASR, shift_count));
1519 __ Move(result, left);
1523 if (shift_count != 0) {
1524 __ mov(result, Operand(left,
LSR, shift_count));
1526 if (instr->can_deopt()) {
1527 __ tst(left, Operand(0x80000000));
1528 DeoptimizeIf(
ne, instr->environment());
1530 __ Move(result, left);
1534 if (shift_count != 0) {
1535 __ mov(result, Operand(left,
LSL, shift_count));
1537 __ Move(result, left);
1548 void LCodeGen::DoSubI(LSubI* instr) {
1549 LOperand* left = instr->left();
1550 LOperand* right = instr->right();
1551 LOperand* result = instr->result();
1555 if (right->IsStackSlot() || right->IsArgument()) {
1556 Register right_reg = EmitLoadRegister(right,
ip);
1559 ASSERT(right->IsRegister() || right->IsConstantOperand());
1564 DeoptimizeIf(
vs, instr->environment());
1569 void LCodeGen::DoConstantI(LConstantI* instr) {
1570 ASSERT(instr->result()->IsRegister());
1571 __ mov(
ToRegister(instr->result()), Operand(instr->value()));
1575 void LCodeGen::DoConstantD(LConstantD* instr) {
1576 ASSERT(instr->result()->IsDoubleRegister());
1577 DwVfpRegister result = ToDoubleRegister(instr->result());
1578 double v = instr->value();
1579 __ Vmov(result, v, scratch0());
1583 void LCodeGen::DoConstantT(LConstantT* instr) {
1584 Handle<Object> value = instr->value();
1585 if (value->IsSmi()) {
1589 Handle<HeapObject>::cast(value));
1594 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1595 Register result =
ToRegister(instr->result());
1601 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1602 Register result =
ToRegister(instr->result());
1608 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1609 Register result =
ToRegister(instr->result());
1611 __ EnumLength(result, map);
1615 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1616 Register result =
ToRegister(instr->result());
1629 void LCodeGen::DoValueOf(LValueOf* instr) {
1631 Register result =
ToRegister(instr->result());
1637 __ Move(result, input,
eq);
1642 __ Move(result, input,
ne);
1650 void LCodeGen::DoDateField(LDateField* instr) {
1652 Register result =
ToRegister(instr->result());
1653 Register scratch =
ToRegister(instr->temp());
1654 Smi* index = instr->index();
1655 Label runtime, done;
1656 ASSERT(
object.is(result));
1658 ASSERT(!scratch.is(scratch0()));
1659 ASSERT(!scratch.is(
object));
1662 DeoptimizeIf(
eq, instr->environment());
1664 DeoptimizeIf(
ne, instr->environment());
1666 if (index->value() == 0) {
1670 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1671 __ mov(scratch, Operand(stamp));
1674 __ cmp(scratch, scratch0());
1681 __ PrepareCallCFunction(2, scratch);
1682 __ mov(
r1, Operand(index));
1683 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1689 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1691 Register result =
ToRegister(instr->result());
1692 __ mvn(result, Operand(input));
1696 void LCodeGen::DoThrow(LThrow* instr) {
1697 Register input_reg = EmitLoadRegister(instr->value(),
ip);
1699 CallRuntime(Runtime::kThrow, 1, instr);
1701 if (FLAG_debug_code) {
1702 __ stop(
"Unreachable code.");
1707 void LCodeGen::DoAddI(LAddI* instr) {
1708 LOperand* left = instr->left();
1709 LOperand* right = instr->right();
1710 LOperand* result = instr->result();
1714 if (right->IsStackSlot() || right->IsArgument()) {
1715 Register right_reg = EmitLoadRegister(right,
ip);
1718 ASSERT(right->IsRegister() || right->IsConstantOperand());
1723 DeoptimizeIf(
vs, instr->environment());
1728 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1729 LOperand* left = instr->left();
1730 LOperand* right = instr->right();
1733 if (instr->hydrogen()->representation().IsInteger32()) {
1735 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1737 : Operand(EmitLoadRegister(right,
ip));
1738 Register result_reg =
ToRegister(instr->result());
1739 __ cmp(left_reg, right_op);
1740 if (!result_reg.is(left_reg)) {
1741 __ mov(result_reg, left_reg,
LeaveCC, condition);
1745 ASSERT(instr->hydrogen()->representation().IsDouble());
1749 Label check_nan_left, check_zero, return_left, return_right, done;
1750 __ VFPCompareAndSetFlags(left_reg, right_reg);
1751 __ b(
vs, &check_nan_left);
1752 __ b(
eq, &check_zero);
1753 __ b(condition, &return_left);
1754 __ b(
al, &return_right);
1756 __ bind(&check_zero);
1757 __ VFPCompareAndSetFlags(left_reg, 0.0);
1758 __ b(
ne, &return_left);
1762 __ vneg(left_reg, left_reg);
1763 __ vsub(result_reg, left_reg, right_reg);
1764 __ vneg(result_reg, result_reg);
1768 __ vadd(result_reg, left_reg, right_reg);
1772 __ bind(&check_nan_left);
1773 __ VFPCompareAndSetFlags(left_reg, left_reg);
1774 __ b(
vs, &return_left);
1775 __ bind(&return_right);
1776 if (!right_reg.is(result_reg)) {
1777 __ vmov(result_reg, right_reg);
1781 __ bind(&return_left);
1782 if (!left_reg.is(result_reg)) {
1783 __ vmov(result_reg, left_reg);
1790 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1794 switch (instr->op()) {
1796 __ vadd(result, left, right);
1799 __ vsub(result, left, right);
1802 __ vmul(result, left, right);
1805 __ vdiv(result, left, right);
1811 __ PrepareCallCFunction(0, 2, scratch0());
1812 __ SetCallCDoubleArguments(left, right);
1814 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1817 __ GetCFunctionDoubleResult(result);
1830 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1839 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1844 int LCodeGen::GetNextEmittedBlock(
int block) {
1845 for (
int i = block + 1; i < graph()->blocks()->length(); ++i) {
1846 LLabel* label = chunk_->GetLabel(i);
1847 if (!label->HasReplacement())
return i;
1853 void LCodeGen::EmitBranch(
int left_block,
int right_block,
Condition cc) {
1854 int next_block = GetNextEmittedBlock(current_block_);
1855 right_block = chunk_->LookupDestination(right_block);
1856 left_block = chunk_->LookupDestination(left_block);
1858 if (right_block == left_block) {
1859 EmitGoto(left_block);
1860 }
else if (left_block == next_block) {
1862 }
else if (right_block == next_block) {
1863 __ b(cc, chunk_->GetAssemblyLabel(left_block));
1865 __ b(cc, chunk_->GetAssemblyLabel(left_block));
1866 __ b(chunk_->GetAssemblyLabel(right_block));
1871 void LCodeGen::DoBranch(LBranch* instr) {
1872 int true_block = chunk_->LookupDestination(instr->true_block_id());
1873 int false_block = chunk_->LookupDestination(instr->false_block_id());
1875 Representation r = instr->hydrogen()->value()->representation();
1876 if (r.IsInteger32()) {
1878 __ cmp(reg, Operand(0));
1879 EmitBranch(true_block, false_block,
ne);
1880 }
else if (r.IsDouble()) {
1882 Register scratch = scratch0();
1885 __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1887 EmitBranch(true_block, false_block,
eq);
1891 HType type = instr->hydrogen()->value()->type();
1892 if (type.IsBoolean()) {
1893 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1894 EmitBranch(true_block, false_block,
eq);
1895 }
else if (type.IsSmi()) {
1896 __ cmp(reg, Operand(0));
1897 EmitBranch(true_block, false_block,
ne);
1899 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1900 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1902 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1908 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1909 __ b(
eq, false_label);
1913 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1914 __ b(
eq, true_label);
1915 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1916 __ b(
eq, false_label);
1920 __ CompareRoot(reg, Heap::kNullValueRootIndex);
1921 __ b(
eq, false_label);
1926 __ cmp(reg, Operand(0));
1927 __ b(
eq, false_label);
1928 __ JumpIfSmi(reg, true_label);
1929 }
else if (expected.NeedsMap()) {
1932 DeoptimizeIf(
eq, instr->environment());
1935 const Register map = scratch0();
1936 if (expected.NeedsMap()) {
1939 if (expected.CanBeUndetectable()) {
1943 __ b(
ne, false_label);
1950 __ b(
ge, true_label);
1957 __ b(
ge, ¬_string);
1959 __ cmp(
ip, Operand(0));
1960 __ b(
ne, true_label);
1962 __ bind(¬_string);
1968 Label not_heap_number;
1969 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1970 __ b(
ne, ¬_heap_number);
1972 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
1973 __ b(
vs, false_label);
1974 __ b(
eq, false_label);
1976 __ bind(¬_heap_number);
1980 DeoptimizeIf(
al, instr->environment());
1986 void LCodeGen::EmitGoto(
int block) {
1987 block = chunk_->LookupDestination(block);
1988 int next_block = GetNextEmittedBlock(current_block_);
1989 if (block != next_block) {
1990 __ jmp(chunk_->GetAssemblyLabel(block));
1995 void LCodeGen::DoGoto(LGoto* instr) {
1996 EmitGoto(instr->block_id());
2004 case Token::EQ_STRICT:
2008 cond = is_unsigned ?
lo :
lt;
2011 cond = is_unsigned ?
hi :
gt;
2014 cond = is_unsigned ?
ls :
le;
2017 cond = is_unsigned ?
hs :
ge;
2020 case Token::INSTANCEOF:
2028 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
2029 LOperand* left = instr->left();
2030 LOperand* right = instr->right();
2031 int false_block = chunk_->LookupDestination(instr->false_block_id());
2032 int true_block = chunk_->LookupDestination(instr->true_block_id());
2033 Condition cond = TokenToCondition(instr->op(),
false);
2035 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2042 EmitGoto(next_block);
2044 if (instr->is_double()) {
2047 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2050 __ b(
vs, chunk_->GetAssemblyLabel(false_block));
2052 if (right->IsConstantOperand()) {
2055 }
else if (left->IsConstantOperand()) {
2064 EmitBranch(true_block, false_block, cond);
2069 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2072 int false_block = chunk_->LookupDestination(instr->false_block_id());
2073 int true_block = chunk_->LookupDestination(instr->true_block_id());
2075 __ cmp(left, Operand(right));
2076 EmitBranch(true_block, false_block,
eq);
2080 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
2082 int true_block = chunk_->LookupDestination(instr->true_block_id());
2083 int false_block = chunk_->LookupDestination(instr->false_block_id());
2085 __ cmp(left, Operand(instr->hydrogen()->right()));
2086 EmitBranch(true_block, false_block,
eq);
2090 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
2091 Register scratch = scratch0();
2093 int false_block = chunk_->LookupDestination(instr->false_block_id());
2097 if (instr->hydrogen()->representation().IsSpecialization() ||
2098 instr->hydrogen()->type().IsSmi()) {
2099 EmitGoto(false_block);
2103 int true_block = chunk_->LookupDestination(instr->true_block_id());
2105 Heap::kNullValueRootIndex :
2106 Heap::kUndefinedValueRootIndex;
2107 __ LoadRoot(
ip, nil_value);
2110 EmitBranch(true_block, false_block,
eq);
2113 Heap::kUndefinedValueRootIndex :
2114 Heap::kNullValueRootIndex;
2115 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2116 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2117 __ b(
eq, true_label);
2118 __ LoadRoot(
ip, other_nil_value);
2120 __ b(
eq, true_label);
2121 __ JumpIfSmi(reg, false_label);
2127 EmitBranch(true_block, false_block,
ne);
2132 Condition LCodeGen::EmitIsObject(Register input,
2134 Label* is_not_object,
2136 Register temp2 = scratch0();
2137 __ JumpIfSmi(input, is_not_object);
2139 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2140 __ cmp(input, temp2);
2141 __ b(
eq, is_object);
2148 __ b(
ne, is_not_object);
2153 __ b(
lt, is_not_object);
2159 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2163 int true_block = chunk_->LookupDestination(instr->true_block_id());
2164 int false_block = chunk_->LookupDestination(instr->false_block_id());
2165 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2166 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2169 EmitIsObject(reg, temp1, false_label, true_label);
2171 EmitBranch(true_block, false_block, true_cond);
2175 Condition LCodeGen::EmitIsString(Register input,
2177 Label* is_not_string) {
2178 __ JumpIfSmi(input, is_not_string);
2185 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2189 int true_block = chunk_->LookupDestination(instr->true_block_id());
2190 int false_block = chunk_->LookupDestination(instr->false_block_id());
2191 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2194 EmitIsString(reg, temp1, false_label);
2196 EmitBranch(true_block, false_block, true_cond);
2200 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2201 int true_block = chunk_->LookupDestination(instr->true_block_id());
2202 int false_block = chunk_->LookupDestination(instr->false_block_id());
2204 Register input_reg = EmitLoadRegister(instr->value(),
ip);
2206 EmitBranch(true_block, false_block,
eq);
2210 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2214 int true_block = chunk_->LookupDestination(instr->true_block_id());
2215 int false_block = chunk_->LookupDestination(instr->false_block_id());
2217 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
2221 EmitBranch(true_block, false_block,
ne);
2227 case Token::EQ_STRICT:
2245 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2247 int true_block = chunk_->LookupDestination(instr->true_block_id());
2248 int false_block = chunk_->LookupDestination(instr->false_block_id());
2251 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2252 __ cmp(
r0, Operand(0));
2254 Condition condition = ComputeCompareCondition(op);
2256 EmitBranch(true_block, false_block, condition);
2260 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2269 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2272 if (from == to)
return eq;
2280 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2281 Register scratch = scratch0();
2284 int true_block = chunk_->LookupDestination(instr->true_block_id());
2285 int false_block = chunk_->LookupDestination(instr->false_block_id());
2287 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2289 __ JumpIfSmi(input, false_label);
2291 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2292 EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
2296 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2298 Register result =
ToRegister(instr->result());
2300 __ AssertString(input);
2303 __ IndexFromHash(result, result);
2307 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2308 LHasCachedArrayIndexAndBranch* instr) {
2310 Register scratch = scratch0();
2312 int true_block = chunk_->LookupDestination(instr->true_block_id());
2313 int false_block = chunk_->LookupDestination(instr->false_block_id());
2318 EmitBranch(true_block, false_block,
eq);
2324 void LCodeGen::EmitClassOfTest(Label* is_true,
2326 Handle<String>class_name,
2331 ASSERT(!input.is(temp2));
2334 __ JumpIfSmi(input, is_false);
2336 if (class_name->IsEqualTo(
CStrVector(
"Function"))) {
2367 if (class_name->IsEqualTo(
CStrVector(
"Object"))) {
2384 __ cmp(temp, Operand(class_name));
2389 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2391 Register temp = scratch0();
2393 Handle<String> class_name = instr->hydrogen()->class_name();
2395 int true_block = chunk_->LookupDestination(instr->true_block_id());
2396 int false_block = chunk_->LookupDestination(instr->false_block_id());
2398 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2399 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2401 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2403 EmitBranch(true_block, false_block,
eq);
2407 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2410 int true_block = instr->true_block_id();
2411 int false_block = instr->false_block_id();
2414 __ cmp(temp, Operand(instr->map()));
2415 EmitBranch(true_block, false_block,
eq);
2419 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2424 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2426 __ cmp(
r0, Operand(0));
2432 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2433 class DeferredInstanceOfKnownGlobal:
public LDeferredCode {
2435 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2436 LInstanceOfKnownGlobal* instr)
2437 : LDeferredCode(codegen), instr_(instr) { }
2438 virtual void Generate() {
2439 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2441 virtual LInstruction* instr() {
return instr_; }
2442 Label* map_check() {
return &map_check_; }
2444 LInstanceOfKnownGlobal* instr_;
2448 DeferredInstanceOfKnownGlobal* deferred;
2449 deferred =
new(zone()) DeferredInstanceOfKnownGlobal(
this, instr);
2451 Label done, false_result;
2452 Register
object =
ToRegister(instr->value());
2454 Register result =
ToRegister(instr->result());
2460 __ JumpIfSmi(
object, &false_result);
2466 Register map = temp;
2472 __ bind(deferred->map_check());
2476 PredictableCodeSizeScope predictable(masm_);
2477 Handle<JSGlobalPropertyCell> cell =
2478 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2479 __ mov(
ip, Operand(Handle<Object>(cell)));
2481 __ cmp(map, Operand(
ip));
2482 __ b(
ne, &cache_miss);
2486 __ mov(result, Operand(factory()->the_hole_value()));
2492 __ bind(&cache_miss);
2494 __ LoadRoot(
ip, Heap::kNullValueRootIndex);
2495 __ cmp(
object, Operand(
ip));
2496 __ b(
eq, &false_result);
2499 Condition is_string = masm_->IsObjectStringType(
object, temp);
2500 __ b(is_string, &false_result);
2503 __ b(deferred->entry());
2505 __ bind(&false_result);
2506 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2510 __ bind(deferred->exit());
2515 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2517 Register result =
ToRegister(instr->result());
2527 InstanceofStub stub(flags);
2529 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
2537 static const int kAdditionalDelta = 5;
2540 PredictableCodeSizeScope predictable(masm_);
2541 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2542 Label before_push_delta;
2543 __ bind(&before_push_delta);
2544 __ BlockConstPoolFor(kAdditionalDelta);
2548 if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2549 ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2552 __ StoreToSafepointRegisterSlot(temp, temp);
2553 CallCodeGeneric(stub.GetCode(),
2554 RelocInfo::CODE_TARGET,
2556 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2557 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2558 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2561 __ StoreToSafepointRegisterSlot(result, result);
2565 void LCodeGen::DoCmpT(LCmpT* instr) {
2569 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2570 __ cmp(
r0, Operand(0));
2572 Condition condition = ComputeCompareCondition(op);
2574 Heap::kTrueValueRootIndex,
2577 Heap::kFalseValueRootIndex,
2582 void LCodeGen::DoReturn(LReturn* instr) {
2587 __ CallRuntime(Runtime::kTraceExit, 1);
2592 __ add(
sp,
sp, Operand(sp_delta));
2597 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2598 Register result =
ToRegister(instr->result());
2599 __ mov(
ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2601 if (instr->hydrogen()->RequiresHoleCheck()) {
2602 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2604 DeoptimizeIf(
eq, instr->environment());
2609 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2613 __ mov(
r2, Operand(instr->name()));
2614 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2615 : RelocInfo::CODE_TARGET_CONTEXT;
2616 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2617 CallCode(ic, mode, instr);
2621 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2623 Register cell = scratch0();
2626 __ mov(cell, Operand(instr->hydrogen()->cell()));
2632 if (instr->hydrogen()->RequiresHoleCheck()) {
2634 Register payload =
ToRegister(instr->temp());
2636 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2637 DeoptimizeIf(
eq, instr->environment());
2646 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2650 __ mov(
r2, Operand(instr->name()));
2651 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
2652 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2653 : isolate()->builtins()->StoreIC_Initialize();
2654 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2658 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2659 Register context =
ToRegister(instr->context());
2660 Register result =
ToRegister(instr->result());
2662 if (instr->hydrogen()->RequiresHoleCheck()) {
2663 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2665 if (instr->hydrogen()->DeoptimizesOnHole()) {
2666 DeoptimizeIf(
eq, instr->environment());
2668 __ mov(result, Operand(factory()->undefined_value()),
LeaveCC,
eq);
2674 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2675 Register context =
ToRegister(instr->context());
2677 Register scratch = scratch0();
2680 Label skip_assignment;
2682 if (instr->hydrogen()->RequiresHoleCheck()) {
2683 __ ldr(scratch, target);
2684 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2685 __ cmp(scratch,
ip);
2686 if (instr->hydrogen()->DeoptimizesOnHole()) {
2687 DeoptimizeIf(
eq, instr->environment());
2689 __ b(
ne, &skip_assignment);
2693 __ str(value, target);
2694 if (instr->hydrogen()->NeedsWriteBarrier()) {
2695 HType type = instr->hydrogen()->value()->type();
2698 __ RecordWriteContextSlot(context,
2708 __ bind(&skip_assignment);
2712 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2713 Register
object =
ToRegister(instr->object());
2714 Register result =
ToRegister(instr->result());
2715 if (instr->hydrogen()->is_in_object()) {
2724 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2727 Handle<String> name,
2728 LEnvironment* env) {
2729 LookupResult lookup(isolate());
2730 type->LookupDescriptor(
NULL, *name, &lookup);
2731 ASSERT(lookup.IsFound() || lookup.IsCacheable());
2732 if (lookup.IsField()) {
2733 int index = lookup.GetLocalFieldIndexFromMap(*type);
2744 }
else if (lookup.IsConstantFunction()) {
2745 Handle<JSFunction>
function(lookup.GetConstantFunctionFromMap(*type));
2746 __ LoadHeapObject(result,
function);
2751 Heap* heap = type->GetHeap();
2752 while (*current != heap->null_value()) {
2753 __ LoadHeapObject(result, current);
2755 __ cmp(result, Operand(Handle<Map>(current->map())));
2756 DeoptimizeIf(
ne, env);
2760 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2765 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2766 Register
object =
ToRegister(instr->object());
2767 Register result =
ToRegister(instr->result());
2768 Register object_map = scratch0();
2770 int map_count = instr->hydrogen()->types()->length();
2771 bool need_generic = instr->hydrogen()->need_generic();
2773 if (map_count == 0 && !need_generic) {
2774 DeoptimizeIf(
al, instr->environment());
2777 Handle<String> name = instr->hydrogen()->name();
2780 for (
int i = 0; i < map_count; ++i) {
2781 bool last = (i == map_count - 1);
2782 Handle<Map> map = instr->hydrogen()->types()->at(i);
2786 if (last && !need_generic) {
2787 DeoptimizeIf(
ne, instr->environment());
2788 __ bind(&check_passed);
2789 EmitLoadFieldOrConstantFunction(
2790 result,
object, map, name, instr->environment());
2794 __ bind(&check_passed);
2795 EmitLoadFieldOrConstantFunction(
2796 result,
object, map, name, instr->environment());
2802 __ mov(
r2, Operand(name));
2803 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2810 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2815 __ mov(
r2, Operand(instr->name()));
2816 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2821 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2822 Register scratch = scratch0();
2823 Register
function =
ToRegister(instr->function());
2824 Register result =
ToRegister(instr->result());
2829 DeoptimizeIf(
ne, instr->environment());
2835 __ b(
ne, &non_instance);
2842 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2844 DeoptimizeIf(
eq, instr->environment());
2848 __ CompareObjectType(result, scratch, scratch,
MAP_TYPE);
2857 __ bind(&non_instance);
2865 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2866 Register result =
ToRegister(instr->result());
2867 Register input =
ToRegister(instr->object());
2868 Register scratch = scratch0();
2871 if (FLAG_debug_code) {
2874 __ LoadRoot(
ip, Heap::kFixedArrayMapRootIndex);
2875 __ cmp(scratch,
ip);
2877 __ LoadRoot(
ip, Heap::kFixedCOWArrayMapRootIndex);
2878 __ cmp(scratch,
ip);
2893 __ Abort(
"Check for fast or external elements failed.");
2899 void LCodeGen::DoLoadExternalArrayPointer(
2900 LLoadExternalArrayPointer* instr) {
2901 Register to_reg =
ToRegister(instr->result());
2902 Register from_reg =
ToRegister(instr->object());
2908 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2909 Register arguments =
ToRegister(instr->arguments());
2910 Register length =
ToRegister(instr->length());
2912 Register result =
ToRegister(instr->result());
2915 __ sub(length, length, index);
2916 __ add(length, length, Operand(1));
2921 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2922 Register elements =
ToRegister(instr->elements());
2923 Register result =
ToRegister(instr->result());
2924 Register scratch = scratch0();
2925 Register store_base = scratch;
2928 if (instr->key()->IsConstantOperand()) {
2931 instr->additional_index());
2932 store_base = elements;
2934 Register key = EmitLoadRegister(instr->key(), scratch0());
2939 if (instr->hydrogen()->key()->representation().IsTagged()) {
2940 __ add(scratch, elements,
2950 if (instr->hydrogen()->RequiresHoleCheck()) {
2953 DeoptimizeIf(
ne, instr->environment());
2955 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2956 __ cmp(result, scratch);
2957 DeoptimizeIf(
eq, instr->environment());
2963 void LCodeGen::DoLoadKeyedFastDoubleElement(
2964 LLoadKeyedFastDoubleElement* instr) {
2965 Register elements =
ToRegister(instr->elements());
2966 bool key_is_constant = instr->key()->IsConstantOperand();
2968 DwVfpRegister result = ToDoubleRegister(instr->result());
2969 Register scratch = scratch0();
2972 int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
2973 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
2974 int constant_key = 0;
2975 if (key_is_constant) {
2977 if (constant_key & 0xF0000000) {
2978 Abort(
"array index constant value too big.");
2984 Operand operand = key_is_constant
2985 ? Operand(((constant_key + instr->additional_index()) <<
2986 element_size_shift) +
2988 : Operand(key,
LSL, shift_size);
2989 __ add(elements, elements, operand);
2990 if (!key_is_constant) {
2991 __ add(elements, elements,
2993 (instr->additional_index() << element_size_shift)));
2996 if (instr->hydrogen()->RequiresHoleCheck()) {
2999 DeoptimizeIf(
eq, instr->environment());
3002 __ vldr(result, elements, 0);
3006 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3008 bool key_is_constant,
3012 int additional_index,
3013 int additional_offset) {
3014 if (additional_index != 0 && !key_is_constant) {
3015 additional_index *= 1 << (element_size - shift_size);
3016 __ add(scratch0(), key, Operand(additional_index));
3019 if (key_is_constant) {
3021 (constant_key << element_size) + additional_offset);
3024 if (additional_index == 0) {
3025 if (shift_size >= 0) {
3033 if (shift_size >= 0) {
3042 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
3043 LLoadKeyedSpecializedArrayElement* instr) {
3044 Register external_pointer =
ToRegister(instr->external_pointer());
3047 bool key_is_constant = instr->key()->IsConstantOperand();
3048 int constant_key = 0;
3049 if (key_is_constant) {
3051 if (constant_key & 0xF0000000) {
3052 Abort(
"array index constant value too big.");
3058 int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
3059 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
3060 int additional_offset = instr->additional_index() << element_size_shift;
3064 CpuFeatures::Scope scope(
VFP3);
3065 DwVfpRegister result = ToDoubleRegister(instr->result());
3066 Operand operand = key_is_constant
3067 ? Operand(constant_key << element_size_shift)
3068 : Operand(key,
LSL, shift_size);
3069 __ add(scratch0(), external_pointer, operand);
3071 __ vldr(result.low(), scratch0(), additional_offset);
3072 __ vcvt_f64_f32(result, result.low());
3074 __ vldr(result, scratch0(), additional_offset);
3077 Register result =
ToRegister(instr->result());
3078 MemOperand mem_operand = PrepareKeyedOperand(
3079 key, external_pointer, key_is_constant, constant_key,
3080 element_size_shift, shift_size,
3081 instr->additional_index(), additional_offset);
3082 switch (elements_kind) {
3084 __ ldrsb(result, mem_operand);
3088 __ ldrb(result, mem_operand);
3091 __ ldrsh(result, mem_operand);
3094 __ ldrh(result, mem_operand);
3097 __ ldr(result, mem_operand);
3100 __ ldr(result, mem_operand);
3102 __ cmp(result, Operand(0x80000000));
3103 DeoptimizeIf(
cs, instr->environment());
3123 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3127 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3132 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3133 Register scratch = scratch0();
3134 Register result =
ToRegister(instr->result());
3136 if (instr->hydrogen()->from_inlined()) {
3137 __ sub(result,
sp, Operand(2 * kPointerSize));
3140 Label done, adapted;
3153 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3154 Register elem =
ToRegister(instr->elements());
3155 Register result =
ToRegister(instr->result());
3161 __ mov(result, Operand(scope()->num_parameters()));
3168 __ SmiUntag(result);
3175 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3176 Register receiver =
ToRegister(instr->receiver());
3177 Register
function =
ToRegister(instr->function());
3178 Register scratch = scratch0();
3183 Label global_object, receiver_ok;
3193 __ b(
ne, &receiver_ok);
3197 __ b(
ne, &receiver_ok);
3200 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3201 __ cmp(receiver, scratch);
3202 __ b(
eq, &global_object);
3203 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3204 __ cmp(receiver, scratch);
3205 __ b(
eq, &global_object);
3209 DeoptimizeIf(
eq, instr->environment());
3211 DeoptimizeIf(
lt, instr->environment());
3212 __ jmp(&receiver_ok);
3214 __ bind(&global_object);
3218 __ bind(&receiver_ok);
3222 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3223 Register receiver =
ToRegister(instr->receiver());
3224 Register
function =
ToRegister(instr->function());
3225 Register length =
ToRegister(instr->length());
3226 Register elements =
ToRegister(instr->elements());
3227 Register scratch = scratch0();
3234 const uint32_t kArgumentsLimit = 1 *
KB;
3235 __ cmp(length, Operand(kArgumentsLimit));
3236 DeoptimizeIf(
hi, instr->environment());
3241 __ mov(receiver, length);
3243 __ add(elements, elements, Operand(1 * kPointerSize));
3249 __ cmp(length, Operand(0));
3254 __ sub(length, length, Operand(1),
SetCC);
3258 ASSERT(instr->HasPointerMap());
3259 LPointerMap* pointers = instr->pointer_map();
3260 RecordPosition(pointers->position());
3261 SafepointGenerator safepoint_generator(
3262 this, pointers, Safepoint::kLazyDeopt);
3265 ParameterCount actual(receiver);
3272 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3273 LOperand* argument = instr->value();
3274 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3275 Abort(
"DoPushArgument not implemented for double type.");
3277 Register argument_reg = EmitLoadRegister(argument,
ip);
3278 __ push(argument_reg);
3283 void LCodeGen::DoDrop(LDrop* instr) {
3284 __ Drop(instr->count());
3288 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3289 Register result =
ToRegister(instr->result());
3294 void LCodeGen::DoContext(LContext* instr) {
3295 Register result =
ToRegister(instr->result());
3300 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3301 Register context =
ToRegister(instr->context());
3302 Register result =
ToRegister(instr->result());
3308 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3310 __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3311 __ push(scratch0());
3312 __ mov(scratch0(), Operand(
Smi::FromInt(instr->hydrogen()->flags())));
3313 __ push(scratch0());
3314 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3318 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3319 Register result =
ToRegister(instr->result());
3324 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3325 Register global =
ToRegister(instr->global_object());
3326 Register result =
ToRegister(instr->result());
3331 void LCodeGen::CallKnownFunction(Handle<JSFunction>
function,
3333 LInstruction* instr,
3336 bool can_invoke_directly = !
function->NeedsArgumentsAdaption() ||
3337 function->shared()->formal_parameter_count() == arity;
3339 LPointerMap* pointers = instr->pointer_map();
3340 RecordPosition(pointers->position());
3342 if (can_invoke_directly) {
3343 if (r1_state == R1_UNINITIALIZED) {
3344 __ LoadHeapObject(
r1,
function);
3352 if (!function->NeedsArgumentsAdaption()) {
3353 __ mov(
r0, Operand(arity));
3357 __ SetCallKind(
r5, call_kind);
3362 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3364 SafepointGenerator
generator(
this, pointers, Safepoint::kLazyDeopt);
3365 ParameterCount count(arity);
3374 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3376 CallKnownFunction(instr->function(),
3384 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3386 Register result =
ToRegister(instr->result());
3387 Register scratch = scratch0();
3391 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
3392 __ cmp(scratch, Operand(
ip));
3393 DeoptimizeIf(
ne, instr->environment());
3396 Register exponent = scratch0();
3403 __ Move(result, input);
3409 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
3413 Register tmp1 = input.is(
r1) ?
r0 :
r1;
3414 Register tmp2 = input.
is(
r2) ?
r0 :
r2;
3415 Register tmp3 = input.
is(
r3) ?
r0 :
r3;
3416 Register tmp4 = input.
is(
r4) ?
r0 :
r4;
3420 Label allocated, slow;
3421 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3422 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3428 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3430 if (!tmp1.is(
r0))
__ mov(tmp1, Operand(
r0));
3432 __ LoadFromSafepointRegisterSlot(input, input);
3435 __ bind(&allocated);
3443 __ StoreToSafepointRegisterSlot(tmp1, result);
3450 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3452 Register result =
ToRegister(instr->result());
3453 __ cmp(input, Operand(0));
3454 __ Move(result, input,
pl);
3458 __ rsb(result, input, Operand(0),
SetCC,
mi);
3460 DeoptimizeIf(
vs, instr->environment());
3464 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3466 class DeferredMathAbsTaggedHeapNumber:
public LDeferredCode {
3468 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3469 LUnaryMathOperation* instr)
3470 : LDeferredCode(codegen), instr_(instr) { }
3471 virtual void Generate() {
3472 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3474 virtual LInstruction* instr() {
return instr_; }
3476 LUnaryMathOperation* instr_;
3479 Representation r = instr->hydrogen()->value()->representation();
3481 DwVfpRegister input = ToDoubleRegister(instr->value());
3482 DwVfpRegister result = ToDoubleRegister(instr->result());
3483 __ vabs(result, input);
3484 }
else if (r.IsInteger32()) {
3485 EmitIntegerMathAbs(instr);
3488 DeferredMathAbsTaggedHeapNumber* deferred =
3489 new(zone()) DeferredMathAbsTaggedHeapNumber(
this, instr);
3492 __ JumpIfNotSmi(input, deferred->entry());
3494 EmitIntegerMathAbs(instr);
3495 __ bind(deferred->exit());
3500 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3502 Register result =
ToRegister(instr->result());
3503 Register scratch = scratch0();
3510 DeoptimizeIf(
ne, instr->environment());
3515 __ cmp(result, Operand(0));
3517 __ vmov(scratch, input.high());
3519 DeoptimizeIf(
ne, instr->environment());
3525 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3527 Register result =
ToRegister(instr->result());
3528 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3529 Register scratch = scratch0();
3530 Label done, check_sign_on_zero;
3533 __ vmov(result, input.high());
3543 __ b(
le, &check_sign_on_zero);
3551 DeoptimizeIf(
ge, instr->environment());
3553 __ Vmov(double_scratch0(), 0.5, scratch);
3554 __ vadd(double_scratch0(), input, double_scratch0());
3561 __ vmov(result, double_scratch0().high());
3562 __ eor(result, result, Operand(scratch),
SetCC);
3564 DeoptimizeIf(
mi, instr->environment());
3575 DeoptimizeIf(
ne, instr->environment());
3579 __ cmp(result, Operand(0));
3581 __ bind(&check_sign_on_zero);
3582 __ vmov(scratch, input.high());
3584 DeoptimizeIf(
ne, instr->environment());
3590 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3593 __ vsqrt(result, input);
3597 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3607 __ VFPCompareAndSetFlags(input, temp);
3608 __ vneg(result, temp,
eq);
3613 __ vsqrt(result, result);
3618 void LCodeGen::DoPower(LPower* instr) {
3619 Representation exponent_type = instr->hydrogen()->right()->representation();
3622 ASSERT(!instr->right()->IsDoubleRegister() ||
3623 ToDoubleRegister(instr->right()).is(
d2));
3624 ASSERT(!instr->right()->IsRegister() ||
3626 ASSERT(ToDoubleRegister(instr->left()).is(
d1));
3627 ASSERT(ToDoubleRegister(instr->result()).is(
d3));
3629 if (exponent_type.IsTagged()) {
3631 __ JumpIfSmi(
r2, &no_deopt);
3633 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
3635 DeoptimizeIf(
ne, instr->environment());
3639 }
else if (exponent_type.IsInteger32()) {
3643 ASSERT(exponent_type.IsDouble());
3650 void LCodeGen::DoRandom(LRandom* instr) {
3651 class DeferredDoRandom:
public LDeferredCode {
3653 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3654 : LDeferredCode(codegen), instr_(instr) { }
3655 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3656 virtual LInstruction* instr() {
return instr_; }
3661 DeferredDoRandom* deferred =
new(zone()) DeferredDoRandom(
this, instr);
3665 ASSERT(ToDoubleRegister(instr->result()).is(
d7));
3668 static const int kSeedSize =
sizeof(uint32_t);
3672 static const int kRandomSeedOffset =
3679 __ cmp(
r1, Operand(0));
3680 __ b(
eq, deferred->entry());
3687 __ and_(
r3,
r1, Operand(0xFFFF));
3688 __ mov(
r4, Operand(18273));
3695 __ and_(
r3,
r0, Operand(0xFFFF));
3696 __ mov(
r4, Operand(36969));
3703 __ and_(
r0,
r0, Operand(0x3FFFF));
3706 __ bind(deferred->exit());
3709 __ mov(
r1, Operand(0x41000000));
3710 __ orr(
r1,
r1, Operand(0x300000));
3721 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3722 __ PrepareCallCFunction(1, scratch0());
3723 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3728 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3729 ASSERT(ToDoubleRegister(instr->result()).is(
d2));
3732 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3736 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3737 ASSERT(ToDoubleRegister(instr->result()).is(
d2));
3740 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3744 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3745 ASSERT(ToDoubleRegister(instr->result()).is(
d2));
3748 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3752 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3753 ASSERT(ToDoubleRegister(instr->result()).is(
d2));
3756 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3760 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3761 switch (instr->op()) {
3775 DoMathPowHalf(instr);
3790 Abort(
"Unimplemented type of LUnaryMathOperation.");
3796 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3798 ASSERT(instr->HasPointerMap());
3800 if (instr->known_function().is_null()) {
3801 LPointerMap* pointers = instr->pointer_map();
3802 RecordPosition(pointers->position());
3803 SafepointGenerator
generator(
this, pointers, Safepoint::kLazyDeopt);
3804 ParameterCount count(instr->arity());
3808 CallKnownFunction(instr->known_function(),
3812 R1_CONTAINS_TARGET);
3817 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3820 int arity = instr->arity();
3822 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3828 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3831 int arity = instr->arity();
3832 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3834 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3835 __ mov(
r2, Operand(instr->name()));
3842 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3846 int arity = instr->arity();
3848 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3853 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3856 int arity = instr->arity();
3857 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3859 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3860 __ mov(
r2, Operand(instr->name()));
3866 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3868 CallKnownFunction(instr->target(),
3876 void LCodeGen::DoCallNew(LCallNew* instr) {
3881 __ mov(
r0, Operand(instr->arity()));
3882 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3886 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3887 CallRuntime(instr->function(), instr->arity(), instr);
3891 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3892 Register
object =
ToRegister(instr->object());
3894 Register scratch = scratch0();
3895 int offset = instr->offset();
3897 ASSERT(!
object.is(value));
3899 if (!instr->transition().is_null()) {
3900 __ mov(scratch, Operand(instr->transition()));
3902 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3905 __ RecordWriteField(
object,
3917 HType type = instr->hydrogen()->value()->type();
3920 if (instr->is_in_object()) {
3922 if (instr->hydrogen()->NeedsWriteBarrier()) {
3924 __ RecordWriteField(
object,
3936 if (instr->hydrogen()->NeedsWriteBarrier()) {
3939 __ RecordWriteField(scratch,
3952 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3957 __ mov(
r2, Operand(instr->name()));
3958 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
3959 ? isolate()->builtins()->StoreIC_Initialize_Strict()
3960 : isolate()->builtins()->StoreIC_Initialize();
3965 void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
3967 LOperand* operand) {
3968 if (value->representation().IsTagged() && !value->type().IsSmi()) {
3969 if (operand->IsRegister()) {
3972 __ mov(
ip, ToOperand(operand));
3975 DeoptimizeIf(
ne, environment);
3980 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3981 DeoptIfTaggedButNotSmi(instr->environment(),
3982 instr->hydrogen()->length(),
3984 DeoptIfTaggedButNotSmi(instr->environment(),
3985 instr->hydrogen()->index(),
3987 if (instr->index()->IsConstantOperand()) {
3988 int constant_index =
3990 if (instr->hydrogen()->length()->representation().IsTagged()) {
3993 __ mov(
ip, Operand(constant_index));
3999 DeoptimizeIf(
hs, instr->environment());
4003 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
4005 Register elements =
ToRegister(instr->object());
4006 Register key = instr->key()->IsRegister() ?
ToRegister(instr->key()) :
no_reg;
4007 Register scratch = scratch0();
4008 Register store_base = scratch;
4012 if (instr->key()->IsConstantOperand()) {
4013 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4016 instr->additional_index());
4017 store_base = elements;
4023 if (instr->hydrogen()->key()->representation().IsTagged()) {
4024 __ add(scratch, elements,
4033 if (instr->hydrogen()->NeedsWriteBarrier()) {
4034 HType type = instr->hydrogen()->value()->type();
4039 __ RecordWrite(elements,
4050 void LCodeGen::DoStoreKeyedFastDoubleElement(
4051 LStoreKeyedFastDoubleElement* instr) {
4052 DwVfpRegister value = ToDoubleRegister(instr->value());
4053 Register elements =
ToRegister(instr->elements());
4055 Register scratch = scratch0();
4056 bool key_is_constant = instr->key()->IsConstantOperand();
4057 int constant_key = 0;
4061 if (key_is_constant) {
4063 if (constant_key & 0xF0000000) {
4064 Abort(
"array index constant value too big.");
4070 int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
4071 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
4072 Operand operand = key_is_constant
4073 ? Operand((constant_key << element_size_shift) +
4075 : Operand(key,
LSL, shift_size);
4076 __ add(scratch, elements, operand);
4077 if (!key_is_constant) {
4078 __ add(scratch, scratch,
4082 if (instr->NeedsCanonicalization()) {
4084 __ VFPCompareAndSetFlags(value, value);
4091 __ vstr(value, scratch, instr->additional_index() << element_size_shift);
4095 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
4096 LStoreKeyedSpecializedArrayElement* instr) {
4098 Register external_pointer =
ToRegister(instr->external_pointer());
4101 bool key_is_constant = instr->key()->IsConstantOperand();
4102 int constant_key = 0;
4103 if (key_is_constant) {
4105 if (constant_key & 0xF0000000) {
4106 Abort(
"array index constant value too big.");
4112 int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
4113 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
4114 int additional_offset = instr->additional_index() << element_size_shift;
4118 CpuFeatures::Scope scope(
VFP3);
4119 DwVfpRegister value(ToDoubleRegister(instr->value()));
4120 Operand operand(key_is_constant
4121 ? Operand(constant_key << element_size_shift)
4122 : Operand(key,
LSL, shift_size));
4123 __ add(scratch0(), external_pointer, operand);
4125 __ vcvt_f32_f64(double_scratch0().low(), value);
4126 __ vstr(double_scratch0().low(), scratch0(), additional_offset);
4128 __ vstr(value, scratch0(), additional_offset);
4132 MemOperand mem_operand = PrepareKeyedOperand(
4133 key, external_pointer, key_is_constant, constant_key,
4134 element_size_shift, shift_size,
4135 instr->additional_index(), additional_offset);
4136 switch (elements_kind) {
4140 __ strb(value, mem_operand);
4144 __ strh(value, mem_operand);
4148 __ str(value, mem_operand);
4167 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4172 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
4173 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4174 : isolate()->builtins()->KeyedStoreIC_Initialize();
4179 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4180 Register object_reg =
ToRegister(instr->object());
4181 Register new_map_reg =
ToRegister(instr->new_map_temp());
4182 Register scratch = scratch0();
4184 Handle<Map> from_map = instr->original_map();
4185 Handle<Map> to_map = instr->transitioned_map();
4189 Label not_applicable;
4191 __ cmp(scratch, Operand(from_map));
4192 __ b(
ne, ¬_applicable);
4193 __ mov(new_map_reg, Operand(to_map));
4202 Register fixed_object_reg =
ToRegister(instr->temp());
4205 __ mov(fixed_object_reg, object_reg);
4206 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
4207 RelocInfo::CODE_TARGET, instr);
4210 Register fixed_object_reg =
ToRegister(instr->temp());
4213 __ mov(fixed_object_reg, object_reg);
4214 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
4215 RelocInfo::CODE_TARGET, instr);
4219 __ bind(¬_applicable);
4223 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4227 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4231 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4232 class DeferredStringCharCodeAt:
public LDeferredCode {
4234 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4235 : LDeferredCode(codegen), instr_(instr) { }
4236 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
4237 virtual LInstruction* instr() {
return instr_; }
4239 LStringCharCodeAt* instr_;
4242 DeferredStringCharCodeAt* deferred =
4243 new(zone()) DeferredStringCharCodeAt(
this, instr);
4250 __ bind(deferred->exit());
4254 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4255 Register
string =
ToRegister(instr->string());
4256 Register result =
ToRegister(instr->result());
4257 Register scratch = scratch0();
4262 __ mov(result, Operand(0));
4264 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4268 if (instr->index()->IsConstantOperand()) {
4277 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4280 __ StoreToSafepointRegisterSlot(
r0, result);
4284 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4285 class DeferredStringCharFromCode:
public LDeferredCode {
4287 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4288 : LDeferredCode(codegen), instr_(instr) { }
4289 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4290 virtual LInstruction* instr() {
return instr_; }
4292 LStringCharFromCode* instr_;
4295 DeferredStringCharFromCode* deferred =
4296 new(zone()) DeferredStringCharFromCode(
this, instr);
4298 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4299 Register char_code =
ToRegister(instr->char_code());
4300 Register result =
ToRegister(instr->result());
4301 ASSERT(!char_code.is(result));
4304 __ b(
hi, deferred->entry());
4305 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4308 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
4310 __ b(
eq, deferred->entry());
4311 __ bind(deferred->exit());
4315 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4316 Register char_code =
ToRegister(instr->char_code());
4317 Register result =
ToRegister(instr->result());
4322 __ mov(result, Operand(0));
4324 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4325 __ SmiTag(char_code);
4327 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4328 __ StoreToSafepointRegisterSlot(
r0, result);
4332 void LCodeGen::DoStringLength(LStringLength* instr) {
4333 Register
string =
ToRegister(instr->string());
4334 Register result =
ToRegister(instr->result());
4339 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4340 LOperand* input = instr->value();
4341 ASSERT(input->IsRegister() || input->IsStackSlot());
4342 LOperand* output = instr->result();
4343 ASSERT(output->IsDoubleRegister());
4344 SwVfpRegister single_scratch = double_scratch0().low();
4345 if (input->IsStackSlot()) {
4346 Register scratch = scratch0();
4347 __ ldr(scratch, ToMemOperand(input));
4348 __ vmov(single_scratch, scratch);
4352 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4356 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4357 LOperand* input = instr->value();
4358 LOperand* output = instr->result();
4360 SwVfpRegister flt_scratch = double_scratch0().low();
4362 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4366 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4367 class DeferredNumberTagI:
public LDeferredCode {
4369 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4370 : LDeferredCode(codegen), instr_(instr) { }
4371 virtual void Generate() {
4372 codegen()->DoDeferredNumberTagI(instr_,
4376 virtual LInstruction* instr() {
return instr_; }
4378 LNumberTagI* instr_;
4384 DeferredNumberTagI* deferred =
new(zone()) DeferredNumberTagI(
this, instr);
4386 __ b(
vs, deferred->entry());
4387 __ bind(deferred->exit());
4391 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4392 class DeferredNumberTagU:
public LDeferredCode {
4394 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4395 : LDeferredCode(codegen), instr_(instr) { }
4396 virtual void Generate() {
4397 codegen()->DoDeferredNumberTagI(instr_,
4401 virtual LInstruction* instr() {
return instr_; }
4403 LNumberTagU* instr_;
4406 LOperand* input = instr->value();
4407 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4410 DeferredNumberTagU* deferred =
new(zone()) DeferredNumberTagU(
this, instr);
4412 __ b(
hi, deferred->entry());
4413 __ SmiTag(reg, reg);
4414 __ bind(deferred->exit());
4418 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4420 IntegerSignedness signedness) {
4425 SwVfpRegister flt_scratch = dbl_scratch.low();
4428 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4431 if (signedness == SIGNED_INT32) {
4436 __ SmiUntag(src, dst);
4437 __ eor(src, src, Operand(0x80000000));
4439 __ vmov(flt_scratch, src);
4440 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
4442 __ vmov(flt_scratch, src);
4443 __ vcvt_f64_u32(dbl_scratch, flt_scratch);
4446 if (FLAG_inline_new) {
4447 __ LoadRoot(
r6, Heap::kHeapNumberMapRootIndex);
4459 __ mov(
ip, Operand(0));
4460 __ StoreToSafepointRegisterSlot(
ip, dst);
4461 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4470 __ StoreToSafepointRegisterSlot(dst, dst);
4474 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4475 class DeferredNumberTagD:
public LDeferredCode {
4477 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4478 : LDeferredCode(codegen), instr_(instr) { }
4479 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4480 virtual LInstruction* instr() {
return instr_; }
4482 LNumberTagD* instr_;
4486 Register scratch = scratch0();
4491 DeferredNumberTagD* deferred =
new(zone()) DeferredNumberTagD(
this, instr);
4492 if (FLAG_inline_new) {
4493 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4495 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4498 __ jmp(deferred->entry());
4500 __ bind(deferred->exit());
4507 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4512 __ mov(reg, Operand(0));
4514 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4515 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4517 __ StoreToSafepointRegisterSlot(
r0, reg);
4521 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4527 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4529 Register result =
ToRegister(instr->result());
4530 if (instr->needs_check()) {
4533 __ SmiUntag(result, input,
SetCC);
4534 DeoptimizeIf(
cs, instr->environment());
4536 __ SmiUntag(result, input);
4541 void LCodeGen::EmitNumberUntagD(Register input_reg,
4543 bool deoptimize_on_undefined,
4544 bool deoptimize_on_minus_zero,
4545 LEnvironment* env) {
4546 Register scratch = scratch0();
4547 SwVfpRegister flt_scratch = double_scratch0().low();
4548 ASSERT(!result_reg.is(double_scratch0()));
4550 Label load_smi, heap_number, done;
4553 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4557 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
4558 __ cmp(scratch, Operand(
ip));
4559 if (deoptimize_on_undefined) {
4560 DeoptimizeIf(
ne, env);
4563 __ b(
eq, &heap_number);
4565 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
4566 __ cmp(input_reg, Operand(
ip));
4567 DeoptimizeIf(
ne, env);
4570 __ LoadRoot(
ip, Heap::kNanValueRootIndex);
4575 __ bind(&heap_number);
4580 if (deoptimize_on_minus_zero) {
4581 __ vmov(
ip, result_reg.low());
4582 __ cmp(
ip, Operand(0));
4584 __ vmov(
ip, result_reg.high());
4586 DeoptimizeIf(
eq, env);
4593 __ vmov(flt_scratch, scratch);
4594 __ vcvt_f64_s32(result_reg, flt_scratch);
4599 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4600 Register input_reg =
ToRegister(instr->value());
4601 Register scratch1 = scratch0();
4602 Register scratch2 =
ToRegister(instr->temp());
4603 DwVfpRegister double_scratch = double_scratch0();
4604 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
4606 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4607 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4615 __ adc(input_reg, input_reg, Operand(input_reg));
4619 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
4620 __ cmp(scratch1, Operand(
ip));
4622 if (instr->truncating()) {
4623 Register scratch3 =
ToRegister(instr->temp2());
4624 SwVfpRegister single_scratch = double_scratch.low();
4625 ASSERT(!scratch3.is(input_reg) &&
4626 !scratch3.is(scratch1) &&
4627 !scratch3.is(scratch2));
4631 __ b(
eq, &heap_number);
4634 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
4635 __ cmp(input_reg, Operand(
ip));
4636 DeoptimizeIf(
ne, instr->environment());
4637 __ mov(input_reg, Operand(0));
4640 __ bind(&heap_number);
4644 __ EmitECMATruncate(input_reg,
4652 CpuFeatures::Scope scope(
VFP3);
4654 DeoptimizeIf(
ne, instr->environment());
4664 DeoptimizeIf(
ne, instr->environment());
4667 __ cmp(input_reg, Operand(0));
4669 __ vmov(scratch1, double_scratch.high());
4671 DeoptimizeIf(
ne, instr->environment());
4678 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4679 class DeferredTaggedToI:
public LDeferredCode {
4681 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4682 : LDeferredCode(codegen), instr_(instr) { }
4683 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4684 virtual LInstruction* instr() {
return instr_; }
4689 LOperand* input = instr->value();
4690 ASSERT(input->IsRegister());
4691 ASSERT(input->Equals(instr->result()));
4695 DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(
this, instr);
4699 __ SmiUntag(input_reg,
SetCC);
4702 __ b(
cs, deferred->entry());
4703 __ bind(deferred->exit());
4707 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4708 LOperand* input = instr->value();
4709 ASSERT(input->IsRegister());
4710 LOperand* result = instr->result();
4711 ASSERT(result->IsDoubleRegister());
4716 EmitNumberUntagD(input_reg, result_reg,
4717 instr->hydrogen()->deoptimize_on_undefined(),
4718 instr->hydrogen()->deoptimize_on_minus_zero(),
4719 instr->environment());
4723 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4724 Register result_reg =
ToRegister(instr->result());
4725 Register scratch1 = scratch0();
4726 Register scratch2 =
ToRegister(instr->temp());
4727 DwVfpRegister double_input = ToDoubleRegister(instr->value());
4731 if (instr->truncating()) {
4732 Register scratch3 =
ToRegister(instr->temp2());
4733 SwVfpRegister single_scratch = double_scratch0().low();
4734 __ EmitECMATruncate(result_reg,
4741 DwVfpRegister double_scratch = double_scratch0();
4751 DeoptimizeIf(
ne, instr->environment());
4757 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4758 LOperand* input = instr->value();
4760 DeoptimizeIf(
ne, instr->environment());
4764 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4765 LOperand* input = instr->value();
4767 DeoptimizeIf(
eq, instr->environment());
4771 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4773 Register scratch = scratch0();
4778 if (instr->hydrogen()->is_interval_check()) {
4781 instr->hydrogen()->GetCheckInterval(&first, &last);
4783 __ cmp(scratch, Operand(first));
4786 if (first == last) {
4787 DeoptimizeIf(
ne, instr->environment());
4789 DeoptimizeIf(
lo, instr->environment());
4792 __ cmp(scratch, Operand(last));
4793 DeoptimizeIf(
hi, instr->environment());
4799 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4803 __ tst(scratch, Operand(mask));
4804 DeoptimizeIf(tag == 0 ?
ne :
eq, instr->environment());
4806 __ and_(scratch, scratch, Operand(mask));
4807 __ cmp(scratch, Operand(tag));
4808 DeoptimizeIf(
ne, instr->environment());
4814 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4816 Handle<JSFunction> target = instr->hydrogen()->target();
4817 if (isolate()->heap()->InNewSpace(*target)) {
4819 Handle<JSGlobalPropertyCell> cell =
4820 isolate()->factory()->NewJSGlobalPropertyCell(target);
4821 __ mov(
ip, Operand(Handle<Object>(cell)));
4825 __ cmp(reg, Operand(target));
4827 DeoptimizeIf(
ne, instr->environment());
4831 void LCodeGen::DoCheckMapCommon(Register reg,
4835 LEnvironment* env) {
4837 __ CompareMap(reg, scratch, map, &success, mode);
4838 DeoptimizeIf(
ne, env);
4843 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4844 Register scratch = scratch0();
4845 LOperand* input = instr->value();
4846 ASSERT(input->IsRegister());
4850 SmallMapList* map_set = instr->hydrogen()->map_set();
4851 for (
int i = 0; i < map_set->length() - 1; i++) {
4852 Handle<Map> map = map_set->at(i);
4856 Handle<Map> map = map_set->last();
4862 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4864 Register result_reg =
ToRegister(instr->result());
4866 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4870 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4871 Register unclamped_reg =
ToRegister(instr->unclamped());
4872 Register result_reg =
ToRegister(instr->result());
4873 __ ClampUint8(result_reg, unclamped_reg);
4877 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4878 Register scratch = scratch0();
4879 Register input_reg =
ToRegister(instr->unclamped());
4880 Register result_reg =
ToRegister(instr->result());
4882 Label is_smi, done, heap_number;
4885 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
4889 __ cmp(scratch, Operand(factory()->heap_number_map()));
4890 __ b(
eq, &heap_number);
4894 __ cmp(input_reg, Operand(factory()->undefined_value()));
4895 DeoptimizeIf(
ne, instr->environment());
4896 __ mov(result_reg, Operand(0));
4900 __ bind(&heap_number);
4903 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4908 __ ClampUint8(result_reg, result_reg);
4914 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4918 Handle<JSObject> holder = instr->holder();
4919 Handle<JSObject> current_prototype = instr->prototype();
4922 __ LoadHeapObject(temp1, current_prototype);
4925 while (!current_prototype.is_identical_to(holder)) {
4926 DoCheckMapCommon(temp1, temp2,
4927 Handle<Map>(current_prototype->map()),
4930 Handle<JSObject>(
JSObject::cast(current_prototype->GetPrototype()));
4932 __ LoadHeapObject(temp1, current_prototype);
4936 DoCheckMapCommon(temp1, temp2,
4937 Handle<Map>(current_prototype->map()),
4939 DeoptimizeIf(
ne, instr->environment());
4943 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4944 class DeferredAllocateObject:
public LDeferredCode {
4946 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4947 : LDeferredCode(codegen), instr_(instr) { }
4948 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4949 virtual LInstruction* instr() {
return instr_; }
4951 LAllocateObject* instr_;
4954 DeferredAllocateObject* deferred =
4955 new(zone()) DeferredAllocateObject(
this, instr);
4957 Register result =
ToRegister(instr->result());
4958 Register scratch =
ToRegister(instr->temp());
4959 Register scratch2 =
ToRegister(instr->temp2());
4960 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4961 Handle<Map> initial_map(constructor->initial_map());
4962 int instance_size = initial_map->instance_size();
4963 ASSERT(initial_map->pre_allocated_property_fields() +
4964 initial_map->unused_property_fields() -
4965 initial_map->inobject_properties() == 0);
4970 ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4971 __ AllocateInNewSpace(instance_size,
4978 __ bind(deferred->exit());
4979 if (FLAG_debug_code) {
4980 Label is_in_new_space;
4981 __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4982 __ Abort(
"Allocated object is not in new-space");
4983 __ bind(&is_in_new_space);
4987 Register map = scratch;
4988 __ LoadHeapObject(map, constructor);
4994 __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4997 if (initial_map->inobject_properties() != 0) {
4998 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4999 for (
int i = 0; i < initial_map->inobject_properties(); i++) {
5007 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
5008 Register result =
ToRegister(instr->result());
5009 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
5010 Handle<Map> initial_map(constructor->initial_map());
5011 int instance_size = initial_map->instance_size();
5016 __ mov(result, Operand(0));
5018 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5021 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5022 __ StoreToSafepointRegisterSlot(
r0, result);
5026 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
5027 Handle<FixedArray> literals(instr->environment()->closure()->literals());
5029 instr->hydrogen()->boilerplate_elements_kind();
5035 boilerplate_elements_kind,
true)) {
5036 __ LoadHeapObject(
r1, instr->hydrogen()->boilerplate_object());
5043 __ cmp(
r2, Operand(boilerplate_elements_kind));
5044 DeoptimizeIf(
ne, instr->environment());
5048 __ LoadHeapObject(
r3, literals);
5052 __ mov(
r1, Operand(isolate()->factory()->empty_fixed_array()));
5056 int length = instr->hydrogen()->length();
5057 if (instr->hydrogen()->IsCopyOnWrite()) {
5058 ASSERT(instr->hydrogen()->depth() == 1);
5061 FastCloneShallowArrayStub stub(mode, length);
5062 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5063 }
else if (instr->hydrogen()->depth() > 1) {
5064 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
5066 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
5072 FastCloneShallowArrayStub stub(mode, length);
5073 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5078 void LCodeGen::EmitDeepCopy(Handle<JSObject>
object,
5086 Handle<FixedArrayBase> elements(object->elements());
5087 bool has_elements = elements->length() > 0 &&
5088 elements->map() != isolate()->heap()->fixed_cow_array_map();
5092 int object_offset = *offset;
5093 int object_size =
object->map()->instance_size();
5094 int elements_offset = *offset + object_size;
5095 int elements_size = has_elements ? elements->Size() : 0;
5096 *offset += object_size + elements_size;
5099 ASSERT(object->properties()->length() == 0);
5100 int inobject_properties =
object->map()->inobject_properties();
5101 int header_size = object_size - inobject_properties *
kPointerSize;
5104 __ add(
r2, result, Operand(elements_offset));
5112 for (
int i = 0; i < inobject_properties; i++) {
5113 int total_offset = object_offset +
object->GetInObjectPropertyOffset(i);
5114 Handle<Object> value = Handle<Object>(
object->InObjectPropertyAt(i));
5115 if (value->IsJSObject()) {
5117 __ add(
r2, result, Operand(*offset));
5119 __ LoadHeapObject(source, value_object);
5120 EmitDeepCopy(value_object, result, source, offset);
5121 }
else if (value->IsHeapObject()) {
5122 __ LoadHeapObject(
r2, Handle<HeapObject>::cast(value));
5125 __ mov(
r2, Operand(value));
5132 __ LoadHeapObject(source, elements);
5139 int elements_length = has_elements ? elements->length() : 0;
5140 if (elements->IsFixedDoubleArray()) {
5141 Handle<FixedDoubleArray> double_array =
5143 for (
int i = 0; i < elements_length; i++) {
5144 int64_t value = double_array->get_representation(i);
5150 __ mov(
r2, Operand(value_low));
5152 __ mov(
r2, Operand(value_high));
5155 }
else if (elements->IsFixedArray()) {
5157 for (
int i = 0; i < elements_length; i++) {
5159 Handle<Object> value(fast_elements->get(i));
5160 if (value->IsJSObject()) {
5162 __ add(
r2, result, Operand(*offset));
5164 __ LoadHeapObject(source, value_object);
5165 EmitDeepCopy(value_object, result, source, offset);
5166 }
else if (value->IsHeapObject()) {
5167 __ LoadHeapObject(
r2, Handle<HeapObject>::cast(value));
5170 __ mov(
r2, Operand(value));
5181 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
5182 int size = instr->hydrogen()->total_size();
5184 instr->hydrogen()->boilerplate()->GetElementsKind();
5190 boilerplate_elements_kind,
true)) {
5191 __ LoadHeapObject(
r1, instr->hydrogen()->boilerplate());
5198 __ cmp(
r2, Operand(boilerplate_elements_kind));
5199 DeoptimizeIf(
ne, instr->environment());
5204 Label allocated, runtime_allocate;
5208 __ bind(&runtime_allocate);
5211 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5213 __ bind(&allocated);
5215 __ LoadHeapObject(
r1, instr->hydrogen()->boilerplate());
5216 EmitDeepCopy(instr->hydrogen()->boilerplate(),
r0,
r1, &offset);
5221 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
5222 Handle<FixedArray> literals(instr->environment()->closure()->literals());
5223 Handle<FixedArray> constant_properties =
5224 instr->hydrogen()->constant_properties();
5227 __ LoadHeapObject(
r4, literals);
5229 __ mov(
r2, Operand(constant_properties));
5230 int flags = instr->hydrogen()->fast_elements()
5237 int properties_count = constant_properties->length() / 2;
5238 if (instr->hydrogen()->depth() > 1) {
5239 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
5242 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
5244 FastCloneShallowObjectStub stub(properties_count);
5245 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5250 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5253 CallRuntime(Runtime::kToFastProperties, 1, instr);
5257 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5264 int literal_offset =
5266 __ LoadHeapObject(
r7, instr->hydrogen()->literals());
5268 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
5270 __ b(
ne, &materialized);
5275 __ mov(
r5, Operand(instr->hydrogen()->pattern()));
5276 __ mov(
r4, Operand(instr->hydrogen()->flags()));
5278 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5281 __ bind(&materialized);
5283 Label allocated, runtime_allocate;
5288 __ bind(&runtime_allocate);
5291 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5294 __ bind(&allocated);
5303 if ((size % (2 * kPointerSize)) != 0) {
5310 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5313 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
5314 bool pretenure = instr->hydrogen()->pretenure();
5315 if (!pretenure && shared_info->num_literals() == 0) {
5316 FastNewClosureStub stub(shared_info->language_mode());
5317 __ mov(
r1, Operand(shared_info));
5319 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5321 __ mov(
r2, Operand(shared_info));
5322 __ mov(
r1, Operand(pretenure
5323 ? factory()->true_value()
5324 : factory()->false_value()));
5326 CallRuntime(Runtime::kNewClosure, 3, instr);
5331 void LCodeGen::DoTypeof(LTypeof* instr) {
5334 CallRuntime(Runtime::kTypeof, 1, instr);
5338 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5340 int true_block = chunk_->LookupDestination(instr->true_block_id());
5341 int false_block = chunk_->LookupDestination(instr->false_block_id());
5342 Label* true_label = chunk_->GetAssemblyLabel(true_block);
5343 Label* false_label = chunk_->GetAssemblyLabel(false_block);
5345 Condition final_branch_condition = EmitTypeofIs(true_label,
5348 instr->type_literal());
5350 EmitBranch(true_block, false_block, final_branch_condition);
5355 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5358 Handle<String> type_name) {
5360 Register scratch = scratch0();
5361 if (type_name->Equals(heap()->number_symbol())) {
5362 __ JumpIfSmi(input, true_label);
5364 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
5365 __ cmp(input, Operand(
ip));
5366 final_branch_condition =
eq;
5368 }
else if (type_name->Equals(heap()->string_symbol())) {
5369 __ JumpIfSmi(input, false_label);
5371 __ b(
ge, false_label);
5374 final_branch_condition =
eq;
5376 }
else if (type_name->Equals(heap()->boolean_symbol())) {
5377 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5378 __ b(
eq, true_label);
5379 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5380 final_branch_condition =
eq;
5382 }
else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
5383 __ CompareRoot(input, Heap::kNullValueRootIndex);
5384 final_branch_condition =
eq;
5386 }
else if (type_name->Equals(heap()->undefined_symbol())) {
5387 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5388 __ b(
eq, true_label);
5389 __ JumpIfSmi(input, false_label);
5394 final_branch_condition =
ne;
5396 }
else if (type_name->Equals(heap()->function_symbol())) {
5398 __ JumpIfSmi(input, false_label);
5400 __ b(
eq, true_label);
5402 final_branch_condition =
eq;
5404 }
else if (type_name->Equals(heap()->object_symbol())) {
5405 __ JumpIfSmi(input, false_label);
5406 if (!FLAG_harmony_typeof) {
5407 __ CompareRoot(input, Heap::kNullValueRootIndex);
5408 __ b(
eq, true_label);
5410 __ CompareObjectType(input, input, scratch,
5412 __ b(
lt, false_label);
5414 __ b(
gt, false_label);
5418 final_branch_condition =
eq;
5424 return final_branch_condition;
5428 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5430 int true_block = chunk_->LookupDestination(instr->true_block_id());
5431 int false_block = chunk_->LookupDestination(instr->false_block_id());
5433 EmitIsConstructCall(temp1, scratch0());
5434 EmitBranch(true_block, false_block,
eq);
5438 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5439 ASSERT(!temp1.is(temp2));
5444 Label check_frame_marker;
5447 __ b(
ne, &check_frame_marker);
5451 __ bind(&check_frame_marker);
5457 void LCodeGen::EnsureSpaceForLazyDeopt() {
5460 int current_pc = masm()->pc_offset();
5462 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5465 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5467 while (padding_size > 0) {
5472 last_lazy_deopt_pc_ = masm()->pc_offset();
5476 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5477 EnsureSpaceForLazyDeopt();
5478 ASSERT(instr->HasEnvironment());
5479 LEnvironment* env = instr->environment();
5480 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5481 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5485 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5486 DeoptimizeIf(
al, instr->environment());
5490 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5491 Register
object =
ToRegister(instr->object());
5493 Register strict = scratch0();
5495 __ Push(
object, key, strict);
5496 ASSERT(instr->HasPointerMap());
5497 LPointerMap* pointers = instr->pointer_map();
5498 RecordPosition(pointers->position());
5499 SafepointGenerator safepoint_generator(
5500 this, pointers, Safepoint::kLazyDeopt);
5501 __ InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, safepoint_generator);
5505 void LCodeGen::DoIn(LIn* instr) {
5509 ASSERT(instr->HasPointerMap());
5510 LPointerMap* pointers = instr->pointer_map();
5511 RecordPosition(pointers->position());
5512 SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
5517 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5518 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5519 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5520 RecordSafepointWithLazyDeopt(
5521 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5522 ASSERT(instr->HasEnvironment());
5523 LEnvironment* env = instr->environment();
5524 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5528 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5529 class DeferredStackCheck:
public LDeferredCode {
5531 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5532 : LDeferredCode(codegen), instr_(instr) { }
5533 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5534 virtual LInstruction* instr() {
return instr_; }
5536 LStackCheck* instr_;
5539 ASSERT(instr->HasEnvironment());
5540 LEnvironment* env = instr->environment();
5543 if (instr->hydrogen()->is_function_entry()) {
5546 __ LoadRoot(
ip, Heap::kStackLimitRootIndex);
5549 StackCheckStub stub;
5550 PredictableCodeSizeScope predictable(masm_);
5551 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5552 EnsureSpaceForLazyDeopt();
5554 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5555 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5557 ASSERT(instr->hydrogen()->is_backwards_branch());
5559 DeferredStackCheck* deferred_stack_check =
5560 new(zone()) DeferredStackCheck(
this, instr);
5561 __ LoadRoot(
ip, Heap::kStackLimitRootIndex);
5563 __ b(
lo, deferred_stack_check->entry());
5564 EnsureSpaceForLazyDeopt();
5565 __ bind(instr->done_label());
5566 deferred_stack_check->SetExit(instr->done_label());
5567 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5575 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5579 LEnvironment* environment = instr->environment();
5580 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5581 instr->SpilledDoubleRegisterArray());
5585 ASSERT(!environment->HasBeenRegistered());
5586 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5587 ASSERT(osr_pc_offset_ == -1);
5588 osr_pc_offset_ = masm()->pc_offset();
5592 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5593 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
5595 DeoptimizeIf(
eq, instr->environment());
5597 Register null_value =
r5;
5598 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5599 __ cmp(
r0, null_value);
5600 DeoptimizeIf(
eq, instr->environment());
5603 DeoptimizeIf(
eq, instr->environment());
5607 DeoptimizeIf(
le, instr->environment());
5609 Label use_cache, call_runtime;
5610 __ CheckEnumCache(null_value, &call_runtime);
5616 __ bind(&call_runtime);
5618 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5621 __ LoadRoot(
ip, Heap::kMetaMapRootIndex);
5623 DeoptimizeIf(
ne, instr->environment());
5624 __ bind(&use_cache);
5628 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5630 Register result =
ToRegister(instr->result());
5631 Label load_cache, done;
5632 __ EnumLength(result, map);
5634 __ b(
ne, &load_cache);
5635 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5638 __ bind(&load_cache);
5639 __ LoadInstanceDescriptors(map, result);
5644 __ cmp(result, Operand(0));
5645 DeoptimizeIf(
eq, instr->environment());
5651 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5652 Register
object =
ToRegister(instr->value());
5655 __ cmp(map, scratch0());
5656 DeoptimizeIf(
ne, instr->environment());
5660 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5661 Register
object =
ToRegister(instr->object());
5663 Register result =
ToRegister(instr->result());
5664 Register scratch = scratch0();
5666 Label out_of_object, done;
5667 __ cmp(index, Operand(0));
5668 __ b(
lt, &out_of_object);
5676 __ bind(&out_of_object);
5681 FixedArray::kHeaderSize - kPointerSize));
static const int kCallerFPOffset
static const int kLengthOffset
static const int kBitFieldOffset
static LGap * cast(LInstruction *instr)
const uint32_t kVFPZConditionFlagBit
const intptr_t kSmiTagMask
static const int kCodeEntryOffset
static const int kMaxAsciiCharCode
static const int kPrototypeOrInitialMapOffset
static int SlotOffset(int index)
virtual void AfterCall() const
const DivMagicNumbers DivMagicNumberFor(int32_t divisor)
static const int kEnumCacheOffset
static Smi * FromInt(int value)
bool IsFastObjectElementsKind(ElementsKind kind)
static TypeFeedbackId None()
static const int kElementsKindBitCount
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
static const int kGlobalReceiverOffset
static const int kExponentBias
static bool IsSupported(CpuFeature f)
static const int kExternalPointerOffset
virtual ~SafepointGenerator()
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
static const int kCallerSPOffset
#define ASSERT(condition)
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
friend class BlockConstPoolScope
const int kPointerSizeLog2
static const int kInObjectFieldCount
static const int kMaximumSlots
MemOperand GlobalObjectOperand()
static const int kInstanceClassNameOffset
int WhichPowerOf2(uint32_t x)
bool is_uint32(int64_t x)
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
static const int kHashFieldOffset
static DwVfpRegister FromAllocationIndex(int index)
Condition ReverseCondition(Condition cond)
const uint32_t kSlotsZapValue
DwVfpRegister DoubleRegister
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
static const int kExponentShift
static const int kValueOffset
const uint32_t kHoleNanUpper32
static const int kPcLoadDelta
static void MaybeCallEntryHook(MacroAssembler *masm)
static LConstantOperand * cast(LOperand *op)
const uint32_t kHoleNanLower32
static Register FromAllocationIndex(int index)
static const int kCacheStampOffset
static const int kPropertiesOffset
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kElementsOffset
static const int kContainsCachedArrayIndexMask
static Vector< T > New(int length)
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
static int OffsetOfElementAt(int index)
static const int kLengthOffset
static int SizeFor(int length)
static const int kHeaderSize
static const int kMapOffset
static const int kValueOffset
bool is(Register reg) const
static const int kLengthOffset
static Address GetDeoptimizationEntry(int id, BailoutType type)
MemOperand FieldMemOperand(Register object, int offset)
static const int kHasNonInstancePrototype
static const int kContextOffset
static const int kFunctionOffset
ElementsKind GetInitialFastElementsKind()
const uint32_t kVFPVConditionFlagBit
static const uint32_t kSignMask
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
static const int kElementsKindShift
static const int kConstructorOffset
static double canonical_not_the_hole_nan_as_double()
static const int kIsUndetectable
static const int kHeaderSize
static const int kMaximumClonedProperties
static const int kInstrSize
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static const int kPrototypeOffset
#define RUNTIME_ENTRY(name, nargs, ressize)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kValueOffset
static const int kNativeContextOffset
static const int kMarkerOffset
static const int kExponentBits
static const int kCompilerHintsOffset
static const int kSharedFunctionInfoOffset
Register ToRegister(int num)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
static const int kMaxValue
static const int kBitField2Offset
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
static const int kMaximumClonedLength
static const int kExponentOffset
static const int kValueOffset
bool EvalComparison(Token::Value op, double op1, double op2)
static JSObject * cast(Object *obj)
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
virtual void BeforeCall(int call_size) const
static const int kMantissaOffset