40 class SafepointGenerator
V8_FINAL :
public CallWrapper {
43 LPointerMap* pointers,
44 Safepoint::DeoptMode
mode)
53 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 LPointerMap* pointers_;
59 Safepoint::DeoptMode deopt_mode_;
66 LPhase phase(
"Z_Code generation", chunk());
75 return GeneratePrologue() &&
77 GenerateDeferredCode() &&
78 GenerateDeoptJumpTable() &&
79 GenerateSafepointTable();
85 code->set_stack_slots(GetStackSlotCount());
86 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
88 PopulateDeoptimizationData(code);
89 info()->CommitDependencies(code);
94 info()->set_bailout_reason(reason);
99 void LCodeGen::SaveCallerDoubles() {
102 Comment(
";;; Save clobbered callee double registers");
104 BitVector* doubles = chunk()->allocated_double_registers();
105 BitVector::Iterator save_iterator(doubles);
106 while (!save_iterator.Done()) {
109 save_iterator.Advance();
115 void LCodeGen::RestoreCallerDoubles() {
118 Comment(
";;; Restore clobbered callee double registers");
119 BitVector* doubles = chunk()->allocated_double_registers();
120 BitVector::Iterator save_iterator(doubles);
122 while (!save_iterator.Done()) {
125 save_iterator.Advance();
131 bool LCodeGen::GeneratePrologue() {
134 if (
info()->IsOptimizing()) {
138 if (strlen(FLAG_stop_at) > 0 &&
139 info_->function()->name()->IsUtf8EqualTo(
CStrVector(FLAG_stop_at))) {
152 if (info_->this_has_uses() &&
153 info_->strict_mode() ==
SLOPPY &&
154 !info_->is_native()) {
156 int receiver_offset = info_->scope()->num_parameters() *
kPointerSize;
157 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
159 __ Branch(&ok,
ne, a2, Operand(at));
170 info()->set_prologue_offset(masm_->pc_offset());
173 frame_is_built_ =
true;
174 info_->AddNoFrameRange(0, masm_->pc_offset());
178 int slots = GetStackSlotCount();
180 if (FLAG_debug_code) {
189 __ Branch(&loop,
ne, a0, Operand(
sp));
196 if (
info()->saves_caller_doubles()) {
202 if (heap_slots > 0) {
203 Comment(
";;; Allocate local context");
205 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
206 FastNewContextStub stub(heap_slots);
210 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
212 RecordSafepoint(Safepoint::kNoLazyDeopt);
219 for (
int i = 0; i < num_parameters; i++) {
221 if (var->IsContextSlot()) {
230 __ RecordWriteContextSlot(
234 Comment(
";;; End allocate local context");
238 if (FLAG_trace &&
info()->IsOptimizing()) {
241 __ CallRuntime(Runtime::kTraceEnter, 0);
243 return !is_aborted();
247 void LCodeGen::GenerateOsrPrologue() {
250 if (osr_pc_offset_ >= 0)
return;
252 osr_pc_offset_ = masm()->pc_offset();
256 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
262 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
263 if (instr->IsCall()) {
266 if (!instr->IsLazyBailout() && !instr->IsGap()) {
267 safepoints_.BumpLastLazySafepointIndex();
272 bool LCodeGen::GenerateDeferredCode() {
274 if (deferred_.length() > 0) {
275 for (
int i = 0; !is_aborted() && i < deferred_.length(); i++) {
279 instructions_->at(code->instruction_index())->hydrogen_value();
280 RecordAndWritePosition(
281 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
283 Comment(
";;; <@%d,#%d> "
284 "-------------------- Deferred %s --------------------",
285 code->instruction_index(),
286 code->instr()->hydrogen_value()->id(),
287 code->instr()->Mnemonic());
288 __ bind(code->entry());
290 Comment(
";;; Build frame");
293 frame_is_built_ =
true;
298 Comment(
";;; Deferred code");
302 Comment(
";;; Destroy frame");
306 frame_is_built_ =
false;
308 __ jmp(code->exit());
313 if (!is_aborted()) status_ =
DONE;
314 return !is_aborted();
318 bool LCodeGen::GenerateDeoptJumpTable() {
319 if (deopt_jump_table_.length() > 0) {
320 Comment(
";;; -------------------- Jump table --------------------");
324 __ bind(&table_start);
326 for (
int i = 0; i < deopt_jump_table_.length(); i++) {
327 __ bind(&deopt_jump_table_[i].label);
328 Address entry = deopt_jump_table_[i].address;
332 Comment(
";;; jump table entry %d.", i);
334 Comment(
";;; jump table entry %d: deoptimization bailout %d.", i,
id);
336 __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
337 if (deopt_jump_table_[i].needs_frame) {
339 if (needs_frame.is_bound()) {
340 __ Branch(&needs_frame);
342 __ bind(&needs_frame);
354 if (
info()->saves_caller_doubles()) {
356 RestoreCallerDoubles();
361 __ RecordComment(
"]");
365 if (!is_aborted()) status_ =
DONE;
366 return !is_aborted();
370 bool LCodeGen::GenerateSafepointTable() {
372 safepoints_.Emit(masm(), GetStackSlotCount());
373 return !is_aborted();
394 if (op->IsRegister()) {
396 }
else if (op->IsConstantOperand()) {
397 LConstantOperand* const_op = LConstantOperand::cast(op);
398 HConstant* constant = chunk_->LookupConstant(const_op);
399 Handle<Object> literal = constant->handle(isolate());
400 Representation r = chunk_->LookupLiteralRepresentation(const_op);
401 if (r.IsInteger32()) {
402 ASSERT(literal->IsNumber());
403 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
404 }
else if (r.IsSmi()) {
405 ASSERT(constant->HasSmiValue());
406 __ li(scratch, Operand(
Smi::FromInt(constant->Integer32Value())));
407 }
else if (r.IsDouble()) {
408 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
410 ASSERT(r.IsSmiOrTagged());
411 __ li(scratch, literal);
414 }
else if (op->IsStackSlot()) {
424 ASSERT(op->IsDoubleRegister());
432 if (op->IsDoubleRegister()) {
434 }
else if (op->IsConstantOperand()) {
435 LConstantOperand* const_op = LConstantOperand::cast(op);
436 HConstant* constant = chunk_->LookupConstant(const_op);
441 __ li(at,
Operand(static_cast<int32_t>(literal->Number())));
442 __ mtc1(at, flt_scratch);
443 __ cvt_d_w(dbl_scratch, flt_scratch);
446 Abort(kUnsupportedDoubleImmediate);
448 Abort(kUnsupportedTaggedImmediate);
450 }
else if (op->IsStackSlot()) {
452 __ ldc1(dbl_scratch, mem_op);
461 HConstant* constant = chunk_->LookupConstant(op);
462 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
463 return constant->handle(isolate());
468 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
473 return chunk_->LookupLiteralRepresentation(op).IsSmi();
483 const Representation& r)
const {
484 HConstant* constant = chunk_->LookupConstant(op);
485 int32_t value = constant->Integer32Value();
486 if (r.IsInteger32())
return value;
487 ASSERT(r.IsSmiOrTagged());
493 HConstant* constant = chunk_->LookupConstant(op);
499 HConstant* constant = chunk_->LookupConstant(op);
500 ASSERT(constant->HasDoubleValue());
501 return constant->DoubleValue();
506 if (op->IsConstantOperand()) {
507 LConstantOperand* const_op = LConstantOperand::cast(op);
508 HConstant* constant = chunk()->LookupConstant(const_op);
509 Representation r = chunk_->LookupLiteralRepresentation(const_op);
511 ASSERT(constant->HasSmiValue());
512 return Operand(
Smi::FromInt(constant->Integer32Value()));
513 }
else if (r.IsInteger32()) {
514 ASSERT(constant->HasInteger32Value());
515 return Operand(constant->Integer32Value());
516 }
else if (r.IsDouble()) {
517 Abort(kToOperandUnsupportedDoubleImmediate);
520 return Operand(constant->handle(isolate()));
521 }
else if (op->IsRegister()) {
523 }
else if (op->IsDoubleRegister()) {
524 Abort(kToOperandIsDoubleRegisterUnimplemented);
533 static int ArgumentsOffsetWithoutFrame(
int index) {
540 ASSERT(!op->IsRegister());
541 ASSERT(!op->IsDoubleRegister());
542 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
548 return MemOperand(
sp, ArgumentsOffsetWithoutFrame(op->index()));
554 ASSERT(op->IsDoubleStackSlot());
567 Translation* translation) {
568 if (environment ==
NULL)
return;
571 int translation_size = environment->translation_size();
573 int height = translation_size - environment->parameter_count();
576 bool has_closure_id = !
info()->closure().is_null() &&
577 !
info()->closure().is_identical_to(environment->closure());
578 int closure_id = has_closure_id
579 ? DefineDeoptimizationLiteral(environment->closure())
580 : Translation::kSelfLiteralId;
582 switch (environment->frame_type()) {
584 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
587 translation->BeginConstructStubFrame(closure_id, translation_size);
590 ASSERT(translation_size == 1);
592 translation->BeginGetterStubFrame(closure_id);
595 ASSERT(translation_size == 2);
597 translation->BeginSetterStubFrame(closure_id);
600 translation->BeginCompiledStubFrame();
603 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
607 int object_index = 0;
608 int dematerialized_index = 0;
609 for (
int i = 0; i < translation_size; ++i) {
610 LOperand* value = environment->values()->at(i);
611 AddToTranslation(environment,
614 environment->HasTaggedValueAt(i),
615 environment->HasUint32ValueAt(i),
617 &dematerialized_index);
622 void LCodeGen::AddToTranslation(LEnvironment* environment,
623 Translation* translation,
627 int* object_index_pointer,
628 int* dematerialized_index_pointer) {
629 if (op == LEnvironment::materialization_marker()) {
630 int object_index = (*object_index_pointer)++;
631 if (environment->ObjectIsDuplicateAt(object_index)) {
632 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
633 translation->DuplicateObject(dupe_of);
636 int object_length = environment->ObjectLengthAt(object_index);
637 if (environment->ObjectIsArgumentsAt(object_index)) {
638 translation->BeginArgumentsObject(object_length);
640 translation->BeginCapturedObject(object_length);
642 int dematerialized_index = *dematerialized_index_pointer;
643 int env_offset = environment->translation_size() + dematerialized_index;
644 *dematerialized_index_pointer += object_length;
645 for (
int i = 0; i < object_length; ++i) {
646 LOperand* value = environment->values()->at(env_offset + i);
647 AddToTranslation(environment,
650 environment->HasTaggedValueAt(env_offset + i),
651 environment->HasUint32ValueAt(env_offset + i),
652 object_index_pointer,
653 dematerialized_index_pointer);
658 if (op->IsStackSlot()) {
660 translation->StoreStackSlot(op->index());
661 }
else if (is_uint32) {
662 translation->StoreUint32StackSlot(op->index());
664 translation->StoreInt32StackSlot(op->index());
666 }
else if (op->IsDoubleStackSlot()) {
667 translation->StoreDoubleStackSlot(op->index());
668 }
else if (op->IsRegister()) {
671 translation->StoreRegister(reg);
672 }
else if (is_uint32) {
673 translation->StoreUint32Register(reg);
675 translation->StoreInt32Register(reg);
677 }
else if (op->IsDoubleRegister()) {
679 translation->StoreDoubleRegister(reg);
680 }
else if (op->IsConstantOperand()) {
681 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
682 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
683 translation->StoreLiteral(src_index);
690 void LCodeGen::CallCode(Handle<Code> code,
691 RelocInfo::Mode
mode,
692 LInstruction* instr) {
693 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
697 void LCodeGen::CallCodeGeneric(Handle<Code> code,
698 RelocInfo::Mode
mode,
700 SafepointMode safepoint_mode) {
703 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
707 void LCodeGen::CallRuntime(
const Runtime::Function*
function,
713 __ CallRuntime(
function, num_arguments, save_doubles);
715 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
719 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
720 if (context->IsRegister()) {
722 }
else if (context->IsStackSlot()) {
724 }
else if (context->IsConstantOperand()) {
725 HConstant* constant =
726 chunk_->LookupConstant(LConstantOperand::cast(context));
727 __ li(
cp, Handle<Object>::cast(constant->handle(isolate())));
738 LoadContextFromDeferred(context);
739 __ CallRuntimeSaveDoubles(
id);
740 RecordSafepointWithRegisters(
741 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
745 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
746 Safepoint::DeoptMode
mode) {
747 if (!environment->HasBeenRegistered()) {
762 int jsframe_count = 0;
769 Translation translation(&translations_, frame_count, jsframe_count, zone());
771 int deoptimization_index = deoptimizations_.length();
772 int pc_offset = masm()->pc_offset();
773 environment->Register(deoptimization_index,
775 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
776 deoptimizations_.
Add(environment, zone());
781 void LCodeGen::DeoptimizeIf(
Condition condition,
782 LEnvironment* environment,
785 const Operand& src2) {
786 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
787 ASSERT(environment->HasBeenRegistered());
788 int id = environment->deoptimization_index();
793 Abort(kBailoutWasNotPrepared);
797 if (FLAG_deopt_every_n_times != 0 && !
info()->IsStub()) {
798 Register scratch = scratch0();
799 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
801 __ Push(a1, scratch);
802 __ li(scratch, Operand(count));
804 __ Subu(a1, a1, Operand(1));
805 __ Branch(&no_deopt,
ne, a1, Operand(zero_reg));
806 __ li(a1, Operand(FLAG_deopt_every_n_times));
816 if (
info()->ShouldTrapOnDeopt()) {
818 if (condition !=
al) {
821 __ stop(
"trap_on_deopt");
828 if (condition ==
al && frame_is_built_ &&
829 !
info()->saves_caller_doubles()) {
834 if (deopt_jump_table_.is_empty() ||
835 (deopt_jump_table_.last().address != entry) ||
836 (deopt_jump_table_.last().bailout_type != bailout_type) ||
837 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
838 Deoptimizer::JumpTableEntry table_entry(entry,
841 deopt_jump_table_.Add(table_entry, zone());
843 __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
848 void LCodeGen::DeoptimizeIf(
Condition condition,
849 LEnvironment* environment,
851 const Operand& src2) {
855 DeoptimizeIf(condition, environment, bailout_type, src1, src2);
859 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
860 int length = deoptimizations_.length();
861 if (length == 0)
return;
862 Handle<DeoptimizationInputData> data =
863 factory()->NewDeoptimizationInputData(length,
TENURED);
865 Handle<ByteArray> translations =
866 translations_.CreateByteArray(isolate()->factory());
867 data->SetTranslationByteArray(*translations);
868 data->SetInlinedFunctionCount(
Smi::FromInt(inlined_function_count_));
869 data->SetOptimizationId(
Smi::FromInt(info_->optimization_id()));
870 if (info_->IsOptimizing()) {
873 data->SetSharedFunctionInfo(*info_->shared_info());
879 factory()->NewFixedArray(deoptimization_literals_.length(),
TENURED);
881 for (
int i = 0; i < deoptimization_literals_.length(); i++) {
882 literals->set(i, *deoptimization_literals_[i]);
884 data->SetLiteralArray(*literals);
887 data->SetOsrAstId(
Smi::FromInt(info_->osr_ast_id().ToInt()));
891 for (
int i = 0; i < length; i++) {
893 data->SetAstId(i, env->ast_id());
894 data->SetTranslationIndex(i,
Smi::FromInt(env->translation_index()));
895 data->SetArgumentsStackHeight(i,
899 code->set_deoptimization_data(*data);
903 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
904 int result = deoptimization_literals_.length();
905 for (
int i = 0; i < deoptimization_literals_.length(); ++i) {
906 if (deoptimization_literals_[i].is_identical_to(literal))
return i;
908 deoptimization_literals_.Add(literal, zone());
913 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
914 ASSERT(deoptimization_literals_.length() == 0);
916 const ZoneList<Handle<JSFunction> >* inlined_closures =
917 chunk()->inlined_closures();
919 for (
int i = 0, length = inlined_closures->length();
922 DefineDeoptimizationLiteral(inlined_closures->at(i));
925 inlined_function_count_ = deoptimization_literals_.length();
929 void LCodeGen::RecordSafepointWithLazyDeopt(
930 LInstruction* instr, SafepointMode safepoint_mode) {
931 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
932 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
934 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
935 RecordSafepointWithRegisters(
936 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
941 void LCodeGen::RecordSafepoint(
942 LPointerMap* pointers,
943 Safepoint::Kind kind,
945 Safepoint::DeoptMode deopt_mode) {
946 ASSERT(expected_safepoint_kind_ == kind);
948 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
949 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
950 kind, arguments, deopt_mode);
951 for (
int i = 0; i < operands->length(); i++) {
952 LOperand* pointer = operands->at(i);
953 if (pointer->IsStackSlot()) {
954 safepoint.DefinePointerSlot(pointer->index(), zone());
955 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
956 safepoint.DefinePointerRegister(
ToRegister(pointer), zone());
962 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
963 Safepoint::DeoptMode deopt_mode) {
964 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
968 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
969 LPointerMap empty_pointers(zone());
970 RecordSafepoint(&empty_pointers, deopt_mode);
974 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
976 Safepoint::DeoptMode deopt_mode) {
978 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
982 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
983 LPointerMap* pointers,
985 Safepoint::DeoptMode deopt_mode) {
987 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
991 void LCodeGen::RecordAndWritePosition(
int position) {
992 if (position == RelocInfo::kNoPosition)
return;
993 masm()->positions_recorder()->RecordPosition(position);
994 masm()->positions_recorder()->WriteRecordedPositions();
998 static const char* LabelType(LLabel* label) {
999 if (label->is_loop_header())
return " (loop header)";
1000 if (label->is_osr_entry())
return " (OSR entry)";
1005 void LCodeGen::DoLabel(LLabel* label) {
1006 Comment(
";;; <@%d,#%d> -------------------- B%d%s --------------------",
1007 current_instruction_,
1008 label->hydrogen_value()->id(),
1011 __ bind(label->label());
1012 current_block_ = label->block_id();
1018 resolver_.Resolve(move);
1027 LParallelMove* move = gap->GetParallelMove(inner_pos);
1033 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1038 void LCodeGen::DoParameter(LParameter* instr) {
1043 void LCodeGen::DoCallStub(LCallStub* instr) {
1046 switch (instr->hydrogen()->major_key()) {
1047 case CodeStub::RegExpExec: {
1048 RegExpExecStub stub;
1049 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1052 case CodeStub::SubString: {
1054 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1057 case CodeStub::StringCompare: {
1058 StringCompareStub stub;
1059 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1068 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1069 GenerateOsrPrologue();
1073 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1074 Register dividend =
ToRegister(instr->dividend());
1075 int32_t divisor = instr->divisor();
1084 HMod* hmod = instr->hydrogen();
1085 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1086 Label dividend_is_not_negative, done;
1089 __ Branch(÷nd_is_not_negative,
ge, dividend, Operand(zero_reg));
1091 __ subu(dividend, zero_reg, dividend);
1092 __ And(dividend, dividend, Operand(mask));
1094 DeoptimizeIf(
eq, instr->environment(), dividend, Operand(zero_reg));
1097 __ subu(dividend, zero_reg, dividend);
1100 __ bind(÷nd_is_not_negative);
1101 __ And(dividend, dividend, Operand(mask));
1106 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1107 Register dividend =
ToRegister(instr->dividend());
1108 int32_t divisor = instr->divisor();
1109 Register result =
ToRegister(instr->result());
1110 ASSERT(!dividend.is(result));
1113 DeoptimizeIf(
al, instr->environment());
1117 __ TruncatingDiv(result, dividend,
Abs(divisor));
1118 __ Mul(result, result, Operand(
Abs(divisor)));
1119 __ Subu(result, dividend, Operand(result));
1122 HMod* hmod = instr->hydrogen();
1124 Label remainder_not_zero;
1125 __ Branch(&remainder_not_zero,
ne, result, Operand(zero_reg));
1126 DeoptimizeIf(
lt, instr->environment(), dividend, Operand(zero_reg));
1127 __ bind(&remainder_not_zero);
1132 void LCodeGen::DoModI(LModI* instr) {
1133 HMod* hmod = instr->hydrogen();
1134 const Register left_reg =
ToRegister(instr->left());
1135 const Register right_reg =
ToRegister(instr->right());
1136 const Register result_reg =
ToRegister(instr->result());
1139 __ div(left_reg, right_reg);
1145 DeoptimizeIf(
eq, instr->environment(), right_reg, Operand(zero_reg));
1151 Label no_overflow_possible;
1152 __ Branch(&no_overflow_possible,
ne, left_reg, Operand(
kMinInt));
1154 DeoptimizeIf(
eq, instr->environment(), right_reg, Operand(-1));
1156 __ Branch(&no_overflow_possible,
ne, right_reg, Operand(-1));
1158 __ mov(result_reg, zero_reg);
1160 __ bind(&no_overflow_possible);
1165 __ mfhi(result_reg);
1167 DeoptimizeIf(
eq, instr->environment(), result_reg, Operand(zero_reg));
1173 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1174 Register dividend =
ToRegister(instr->dividend());
1175 int32_t divisor = instr->divisor();
1176 Register result =
ToRegister(instr->result());
1178 ASSERT(!result.is(dividend));
1181 HDiv* hdiv = instr->hydrogen();
1183 DeoptimizeIf(
eq, instr->environment(), dividend, Operand(zero_reg));
1187 DeoptimizeIf(
eq, instr->environment(), dividend, Operand(
kMinInt));
1191 divisor != 1 && divisor != -1) {
1192 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1193 __ And(at, dividend, Operand(mask));
1194 DeoptimizeIf(
ne, instr->environment(), at, Operand(zero_reg));
1197 if (divisor == -1) {
1198 __ Subu(result, zero_reg, dividend);
1203 __ Move(result, dividend);
1204 }
else if (shift == 1) {
1205 __ srl(result, dividend, 31);
1206 __ Addu(result, dividend, Operand(result));
1208 __ sra(result, dividend, 31);
1209 __ srl(result, result, 32 - shift);
1210 __ Addu(result, dividend, Operand(result));
1212 if (shift > 0)
__ sra(result, result, shift);
1213 if (divisor < 0)
__ Subu(result, zero_reg, result);
1217 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1218 Register dividend =
ToRegister(instr->dividend());
1219 int32_t divisor = instr->divisor();
1220 Register result =
ToRegister(instr->result());
1221 ASSERT(!dividend.is(result));
1224 DeoptimizeIf(
al, instr->environment());
1229 HDiv* hdiv = instr->hydrogen();
1231 DeoptimizeIf(
eq, instr->environment(), dividend, Operand(zero_reg));
1234 __ TruncatingDiv(result, dividend,
Abs(divisor));
1235 if (divisor < 0)
__ Subu(result, zero_reg, result);
1238 __ Mul(scratch0(), result, Operand(divisor));
1239 __ Subu(scratch0(), scratch0(), dividend);
1240 DeoptimizeIf(
ne, instr->environment(), scratch0(), Operand(zero_reg));
1245 void LCodeGen::DoDivI(LDivI* instr) {
1246 HBinaryOperation* hdiv = instr->hydrogen();
1247 const Register left =
ToRegister(instr->left());
1248 const Register right =
ToRegister(instr->right());
1249 const Register result =
ToRegister(instr->result());
1253 __ div(left, right);
1257 DeoptimizeIf(
eq, instr->environment(), right, Operand(zero_reg));
1262 Label left_not_zero;
1263 __ Branch(&left_not_zero,
ne, left, Operand(zero_reg));
1264 DeoptimizeIf(
lt, instr->environment(), right, Operand(zero_reg));
1265 __ bind(&left_not_zero);
1271 Label left_not_min_int;
1272 __ Branch(&left_not_min_int,
ne, left, Operand(
kMinInt));
1273 DeoptimizeIf(
eq, instr->environment(), right, Operand(-1));
1274 __ bind(&left_not_min_int);
1277 if (hdiv->IsMathFloorOfDiv()) {
1280 Register remainder = scratch0();
1284 __ Xor(remainder, remainder, Operand(right));
1285 __ Branch(&done,
ge, remainder, Operand(zero_reg));
1286 __ Subu(result, result, Operand(1));
1290 DeoptimizeIf(
ne, instr->environment(), result, Operand(zero_reg));
1298 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1306 __ madd_d(addend, addend, multiplier, multiplicand);
1310 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1311 Register dividend =
ToRegister(instr->dividend());
1312 Register result =
ToRegister(instr->result());
1313 int32_t divisor = instr->divisor();
1314 Register scratch = scratch0();
1315 ASSERT(!scratch.is(dividend));
1319 if (divisor == 1)
return;
1322 __ sra(result, dividend, shift);
1328 __ Move(scratch, dividend);
1330 __ Subu(result, zero_reg, dividend);
1332 DeoptimizeIf(
eq, instr->environment(), result, Operand(zero_reg));
1337 __ Xor(at, scratch, result);
1338 if (divisor == -1) {
1339 DeoptimizeIf(
ge, instr->environment(), at, Operand(zero_reg));
1340 __ sra(result, dividend, shift);
1343 __ Branch(&no_overflow,
lt, at, Operand(zero_reg));
1344 __ li(result, Operand(
kMinInt / divisor));
1346 __ bind(&no_overflow);
1347 __ sra(result, dividend, shift);
1351 __ sra(result, dividend, shift);
1356 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1357 Register dividend =
ToRegister(instr->dividend());
1358 int32_t divisor = instr->divisor();
1359 Register result =
ToRegister(instr->result());
1360 ASSERT(!dividend.is(result));
1363 DeoptimizeIf(
al, instr->environment());
1368 HMathFloorOfDiv* hdiv = instr->hydrogen();
1370 DeoptimizeIf(
eq, instr->environment(), dividend, Operand(zero_reg));
1377 __ TruncatingDiv(result, dividend,
Abs(divisor));
1378 if (divisor < 0)
__ Subu(result, zero_reg, result);
1385 ASSERT(!temp.is(dividend) && !temp.is(result));
1386 Label needs_adjustment, done;
1387 __ Branch(&needs_adjustment, divisor > 0 ?
lt :
gt,
1388 dividend, Operand(zero_reg));
1389 __ TruncatingDiv(result, dividend,
Abs(divisor));
1390 if (divisor < 0)
__ Subu(result, zero_reg, result);
1392 __ bind(&needs_adjustment);
1393 __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1394 __ TruncatingDiv(result, temp,
Abs(divisor));
1395 if (divisor < 0)
__ Subu(result, zero_reg, result);
1396 __ Subu(result, result, Operand(1));
1401 void LCodeGen::DoMulI(LMulI* instr) {
1402 Register scratch = scratch0();
1403 Register result =
ToRegister(instr->result());
1406 LOperand* right_op = instr->right();
1408 bool bailout_on_minus_zero =
1412 if (right_op->IsConstantOperand()) {
1415 if (bailout_on_minus_zero && (constant < 0)) {
1418 DeoptimizeIf(
eq, instr->environment(), left, Operand(zero_reg));
1424 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1425 DeoptimizeIf(
lt, instr->environment(), scratch, Operand(zero_reg));
1427 __ Subu(result, zero_reg, left);
1431 if (bailout_on_minus_zero) {
1434 DeoptimizeIf(
lt, instr->environment(), left, Operand(zero_reg));
1436 __ mov(result, zero_reg);
1440 __ Move(result, left);
1446 int32_t mask = constant >> 31;
1447 uint32_t constant_abs = (constant + mask) ^ mask;
1451 __ sll(result, left, shift);
1453 if (constant < 0)
__ Subu(result, zero_reg, result);
1456 __ sll(scratch, left, shift);
1457 __ Addu(result, scratch, left);
1459 if (constant < 0)
__ Subu(result, zero_reg, result);
1462 __ sll(scratch, left, shift);
1463 __ Subu(result, scratch, left);
1465 if (constant < 0)
__ Subu(result, zero_reg, result);
1468 __ li(at, constant);
1469 __ Mul(result, left, at);
1474 ASSERT(right_op->IsRegister());
1479 if (instr->hydrogen()->representation().IsSmi()) {
1480 __ SmiUntag(result, left);
1481 __ mult(result, right);
1485 __ mult(left, right);
1489 __ sra(at, result, 31);
1490 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(at));
1492 if (instr->hydrogen()->representation().IsSmi()) {
1493 __ SmiUntag(result, left);
1494 __ Mul(result, result, right);
1496 __ Mul(result, left, right);
1500 if (bailout_on_minus_zero) {
1502 __ Xor(at, left, right);
1503 __ Branch(&done,
ge, at, Operand(zero_reg));
1506 instr->environment(),
1515 void LCodeGen::DoBitI(LBitI* instr) {
1516 LOperand* left_op = instr->left();
1517 LOperand* right_op = instr->right();
1518 ASSERT(left_op->IsRegister());
1520 Register result =
ToRegister(instr->result());
1523 if (right_op->IsStackSlot()) {
1526 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1530 switch (instr->op()) {
1531 case Token::BIT_AND:
1532 __ And(result, left, right);
1535 __ Or(result, left, right);
1537 case Token::BIT_XOR:
1538 if (right_op->IsConstantOperand() && right.immediate() ==
int32_t(~0)) {
1539 __ Nor(result, zero_reg, left);
1541 __ Xor(result, left, right);
1551 void LCodeGen::DoShiftI(LShiftI* instr) {
1554 LOperand* right_op = instr->right();
1556 Register result =
ToRegister(instr->result());
1557 Register scratch = scratch0();
1559 if (right_op->IsRegister()) {
1562 switch (instr->op()) {
1571 if (instr->can_deopt()) {
1572 DeoptimizeIf(
lt, instr->environment(), result, Operand(zero_reg));
1584 int value =
ToInteger32(LConstantOperand::cast(right_op));
1585 uint8_t shift_count =
static_cast<uint8_t
>(value & 0x1F);
1586 switch (instr->op()) {
1588 if (shift_count != 0) {
1589 __ Ror(result, left, Operand(shift_count));
1591 __ Move(result, left);
1595 if (shift_count != 0) {
1596 __ sra(result, left, shift_count);
1598 __ Move(result, left);
1602 if (shift_count != 0) {
1603 __ srl(result, left, shift_count);
1605 if (instr->can_deopt()) {
1606 __ And(at, left, Operand(0x80000000));
1607 DeoptimizeIf(
ne, instr->environment(), at, Operand(zero_reg));
1609 __ Move(result, left);
1613 if (shift_count != 0) {
1614 if (instr->hydrogen_value()->representation().IsSmi() &&
1615 instr->can_deopt()) {
1616 if (shift_count != 1) {
1617 __ sll(result, left, shift_count - 1);
1618 __ SmiTagCheckOverflow(result, result, scratch);
1620 __ SmiTagCheckOverflow(result, left, scratch);
1622 DeoptimizeIf(
lt, instr->environment(), scratch, Operand(zero_reg));
1624 __ sll(result, left, shift_count);
1627 __ Move(result, left);
1638 void LCodeGen::DoSubI(LSubI* instr) {
1639 LOperand* left = instr->left();
1640 LOperand* right = instr->right();
1641 LOperand* result = instr->result();
1644 if (!can_overflow) {
1645 if (right->IsStackSlot()) {
1649 ASSERT(right->IsRegister() || right->IsConstantOperand());
1653 Register overflow = scratch0();
1654 Register scratch = scratch1();
1655 if (right->IsStackSlot() || right->IsConstantOperand()) {
1662 ASSERT(right->IsRegister());
1670 DeoptimizeIf(
lt, instr->environment(),
overflow, Operand(zero_reg));
1675 void LCodeGen::DoConstantI(LConstantI* instr) {
1676 __ li(
ToRegister(instr->result()), Operand(instr->value()));
1680 void LCodeGen::DoConstantS(LConstantS* instr) {
1681 __ li(
ToRegister(instr->result()), Operand(instr->value()));
1685 void LCodeGen::DoConstantD(LConstantD* instr) {
1686 ASSERT(instr->result()->IsDoubleRegister());
1688 double v = instr->value();
1693 void LCodeGen::DoConstantE(LConstantE* instr) {
1694 __ li(
ToRegister(instr->result()), Operand(instr->value()));
1698 void LCodeGen::DoConstantT(LConstantT* instr) {
1699 Handle<Object> value = instr->value(isolate());
1705 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1706 Register result =
ToRegister(instr->result());
1708 __ EnumLength(result, map);
1712 void LCodeGen::DoDateField(LDateField* instr) {
1714 Register result =
ToRegister(instr->result());
1715 Register scratch =
ToRegister(instr->temp());
1716 Smi* index = instr->index();
1717 Label runtime, done;
1720 ASSERT(!scratch.is(scratch0()));
1721 ASSERT(!scratch.is(
object));
1723 __ SmiTst(
object, at);
1724 DeoptimizeIf(
eq, instr->environment(), at, Operand(zero_reg));
1725 __ GetObjectType(
object, scratch, scratch);
1726 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(
JS_DATE_TYPE));
1728 if (index->value() == 0) {
1732 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1733 __ li(scratch, Operand(stamp));
1736 __ Branch(&runtime,
ne, scratch, Operand(scratch0()));
1742 __ PrepareCallCFunction(2, scratch);
1743 __ li(a1, Operand(index));
1744 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1750 MemOperand LCodeGen::BuildSeqStringOperand(Register
string,
1753 if (index->IsConstantOperand()) {
1754 int offset =
ToInteger32(LConstantOperand::cast(index));
1761 Register scratch = scratch0();
1762 ASSERT(!scratch.is(
string));
1769 __ Addu(scratch,
string, scratch);
1775 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1777 Register
string =
ToRegister(instr->string());
1778 Register result =
ToRegister(instr->result());
1780 if (FLAG_debug_code) {
1781 Register scratch = scratch0();
1785 __ And(scratch, scratch,
1790 ? one_byte_seq_type : two_byte_seq_type));
1791 __ Check(
eq, kUnexpectedStringType, at, Operand(zero_reg));
1794 MemOperand operand = BuildSeqStringOperand(
string, instr->index(), encoding);
1796 __ lbu(result, operand);
1798 __ lhu(result, operand);
1803 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1805 Register
string =
ToRegister(instr->string());
1808 if (FLAG_debug_code) {
1809 Register scratch = scratch0();
1815 ? one_byte_seq_type : two_byte_seq_type;
1816 __ EmitSeqStringSetCharCheck(
string, index, value, scratch, encoding_mask);
1819 MemOperand operand = BuildSeqStringOperand(
string, instr->index(), encoding);
1821 __ sb(value, operand);
1823 __ sh(value, operand);
1828 void LCodeGen::DoAddI(LAddI* instr) {
1829 LOperand* left = instr->left();
1830 LOperand* right = instr->right();
1831 LOperand* result = instr->result();
1834 if (!can_overflow) {
1835 if (right->IsStackSlot()) {
1839 ASSERT(right->IsRegister() || right->IsConstantOperand());
1843 Register overflow = scratch0();
1844 Register scratch = scratch1();
1845 if (right->IsStackSlot() ||
1846 right->IsConstantOperand()) {
1853 ASSERT(right->IsRegister());
1861 DeoptimizeIf(
lt, instr->environment(),
overflow, Operand(zero_reg));
1866 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1867 LOperand* left = instr->left();
1868 LOperand* right = instr->right();
1869 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1870 Condition condition = (operation == HMathMinMax::kMathMin) ?
le :
ge;
1871 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1873 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1876 Register result_reg =
ToRegister(instr->result());
1877 Label return_right, done;
1878 if (!result_reg.is(left_reg)) {
1880 __ mov(result_reg, left_reg);
1883 __ Branch(&done, condition, left_reg, right_op);
1884 __ bind(&return_right);
1885 __ Addu(result_reg, zero_reg, right_op);
1888 ASSERT(instr->hydrogen()->representation().IsDouble());
1892 Label check_nan_left, check_zero, return_left, return_right, done;
1893 __ BranchF(&check_zero, &check_nan_left,
eq, left_reg, right_reg);
1894 __ BranchF(&return_left,
NULL, condition, left_reg, right_reg);
1895 __ Branch(&return_right);
1897 __ bind(&check_zero);
1901 if (operation == HMathMinMax::kMathMin) {
1902 __ neg_d(left_reg, left_reg);
1903 __ sub_d(result_reg, left_reg, right_reg);
1904 __ neg_d(result_reg, result_reg);
1906 __ add_d(result_reg, left_reg, right_reg);
1910 __ bind(&check_nan_left);
1912 __ BranchF(
NULL, &return_left,
eq, left_reg, left_reg);
1913 __ bind(&return_right);
1914 if (!right_reg.is(result_reg)) {
1915 __ mov_d(result_reg, right_reg);
1919 __ bind(&return_left);
1920 if (!left_reg.is(result_reg)) {
1921 __ mov_d(result_reg, left_reg);
1928 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1932 switch (instr->op()) {
1934 __ add_d(result, left, right);
1937 __ sub_d(result, left, right);
1940 __ mul_d(result, left, right);
1943 __ div_d(result, left, right);
1947 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1948 __ MultiPush(saved_regs);
1950 __ PrepareCallCFunction(0, 2, scratch0());
1951 __ MovToFloatParameters(left, right);
1953 ExternalReference::mod_two_doubles_operation(isolate()),
1956 __ MovFromFloatResult(result);
1959 __ MultiPop(saved_regs);
1969 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1976 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1983 template<
class InstrType>
1984 void LCodeGen::EmitBranch(InstrType instr,
1987 const Operand& src2) {
1988 int left_block = instr->TrueDestination(chunk_);
1989 int right_block = instr->FalseDestination(chunk_);
1991 int next_block = GetNextEmittedBlock();
1992 if (right_block == left_block || condition ==
al) {
1993 EmitGoto(left_block);
1994 }
else if (left_block == next_block) {
1995 __ Branch(chunk_->GetAssemblyLabel(right_block),
1997 }
else if (right_block == next_block) {
1998 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2000 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2001 __ Branch(chunk_->GetAssemblyLabel(right_block));
2006 template<
class InstrType>
2007 void LCodeGen::EmitBranchF(InstrType instr,
2011 int right_block = instr->FalseDestination(chunk_);
2012 int left_block = instr->TrueDestination(chunk_);
2014 int next_block = GetNextEmittedBlock();
2015 if (right_block == left_block) {
2016 EmitGoto(left_block);
2017 }
else if (left_block == next_block) {
2018 __ BranchF(chunk_->GetAssemblyLabel(right_block),
NULL,
2020 }
else if (right_block == next_block) {
2021 __ BranchF(chunk_->GetAssemblyLabel(left_block),
NULL,
2022 condition, src1, src2);
2024 __ BranchF(chunk_->GetAssemblyLabel(left_block),
NULL,
2025 condition, src1, src2);
2026 __ Branch(chunk_->GetAssemblyLabel(right_block));
2031 template<
class InstrType>
2032 void LCodeGen::EmitFalseBranch(InstrType instr,
2035 const Operand& src2) {
2036 int false_block = instr->FalseDestination(chunk_);
2037 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2041 template<
class InstrType>
2042 void LCodeGen::EmitFalseBranchF(InstrType instr,
2046 int false_block = instr->FalseDestination(chunk_);
2047 __ BranchF(chunk_->GetAssemblyLabel(false_block),
NULL,
2048 condition, src1, src2);
2052 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2053 __ stop(
"LDebugBreak");
2057 void LCodeGen::DoBranch(LBranch* instr) {
2058 Representation r = instr->hydrogen()->value()->representation();
2059 if (r.IsInteger32() || r.IsSmi()) {
2062 EmitBranch(instr,
ne, reg, Operand(zero_reg));
2063 }
else if (r.IsDouble()) {
2071 HType type = instr->hydrogen()->value()->
type();
2072 if (type.IsBoolean()) {
2074 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2075 EmitBranch(instr,
eq, reg, Operand(at));
2076 }
else if (type.IsSmi()) {
2078 EmitBranch(instr,
ne, reg, Operand(zero_reg));
2079 }
else if (type.IsJSArray()) {
2081 EmitBranch(instr,
al, zero_reg, Operand(zero_reg));
2082 }
else if (type.IsHeapNumber()) {
2088 }
else if (type.IsString()) {
2091 EmitBranch(instr,
ne, at, Operand(zero_reg));
2093 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2099 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2100 __ Branch(instr->FalseLabel(chunk_),
eq, reg, Operand(at));
2104 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2105 __ Branch(instr->TrueLabel(chunk_),
eq, reg, Operand(at));
2106 __ LoadRoot(at, Heap::kFalseValueRootIndex);
2107 __ Branch(instr->FalseLabel(chunk_),
eq, reg, Operand(at));
2111 __ LoadRoot(at, Heap::kNullValueRootIndex);
2112 __ Branch(instr->FalseLabel(chunk_),
eq, reg, Operand(at));
2117 __ Branch(instr->FalseLabel(chunk_),
eq, reg, Operand(zero_reg));
2118 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2119 }
else if (expected.NeedsMap()) {
2122 DeoptimizeIf(
eq, instr->environment(), at, Operand(zero_reg));
2125 const Register map = scratch0();
2126 if (expected.NeedsMap()) {
2128 if (expected.CanBeUndetectable()) {
2132 __ Branch(instr->FalseLabel(chunk_),
ne, at, Operand(zero_reg));
2139 __ Branch(instr->TrueLabel(chunk_),
2149 __ Branch(instr->TrueLabel(chunk_),
ne, at, Operand(zero_reg));
2150 __ Branch(instr->FalseLabel(chunk_));
2151 __ bind(¬_string);
2156 const Register scratch = scratch1();
2158 __ Branch(instr->TrueLabel(chunk_),
eq, scratch, Operand(
SYMBOL_TYPE));
2164 Label not_heap_number;
2165 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2166 __ Branch(¬_heap_number,
ne, map, Operand(at));
2168 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2171 __ Branch(instr->FalseLabel(chunk_));
2172 __ bind(¬_heap_number);
2175 if (!expected.IsGeneric()) {
2178 DeoptimizeIf(
al, instr->environment(), zero_reg, Operand(zero_reg));
2185 void LCodeGen::EmitGoto(
int block) {
2192 void LCodeGen::DoGoto(LGoto* instr) {
2193 EmitGoto(instr->block_id());
2201 case Token::EQ_STRICT:
2205 case Token::NE_STRICT:
2209 cond = is_unsigned ?
lo :
lt;
2212 cond = is_unsigned ?
hi :
gt;
2215 cond = is_unsigned ?
ls :
le;
2218 cond = is_unsigned ?
hs :
ge;
2221 case Token::INSTANCEOF:
2229 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2230 LOperand* left = instr->left();
2231 LOperand* right = instr->right();
2232 Condition cond = TokenToCondition(instr->op(),
false);
2234 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2236 double left_val =
ToDouble(LConstantOperand::cast(left));
2237 double right_val =
ToDouble(LConstantOperand::cast(right));
2238 int next_block =
EvalComparison(instr->op(), left_val, right_val) ?
2239 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2240 EmitGoto(next_block);
2242 if (instr->is_double()) {
2250 __ BranchF(
NULL, instr->FalseLabel(chunk_),
eq,
2251 left_reg, right_reg);
2253 EmitBranchF(instr, cond, left_reg, right_reg);
2256 Operand cmp_right = Operand(0);
2258 if (right->IsConstantOperand()) {
2260 if (instr->hydrogen_value()->representation().IsSmi()) {
2265 cmp_right = Operand(value);
2267 }
else if (left->IsConstantOperand()) {
2269 if (instr->hydrogen_value()->representation().IsSmi()) {
2274 cmp_right = Operand(value);
2283 EmitBranch(instr, cond, cmp_left, cmp_right);
2289 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2293 EmitBranch(instr,
eq, left, Operand(right));
2297 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2298 if (instr->hydrogen()->representation().IsTagged()) {
2299 Register input_reg =
ToRegister(instr->object());
2300 __ li(at, Operand(factory()->the_hole_value()));
2301 EmitBranch(instr,
eq, input_reg, Operand(at));
2306 EmitFalseBranchF(instr,
eq, input_reg, input_reg);
2308 Register scratch = scratch0();
2309 __ FmoveHigh(scratch, input_reg);
2314 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2315 Representation rep = instr->hydrogen()->value()->representation();
2316 ASSERT(!rep.IsInteger32());
2317 Register scratch =
ToRegister(instr->temp());
2319 if (rep.IsDouble()) {
2322 __ FmoveHigh(scratch, value);
2323 __ li(at, 0x80000000);
2328 Heap::kHeapNumberMapRootIndex,
2329 instr->FalseLabel(chunk()),
2332 EmitFalseBranch(instr,
ne, scratch, Operand(0x80000000));
2334 __ mov(at, zero_reg);
2336 EmitBranch(instr,
eq, scratch, Operand(at));
2340 Condition LCodeGen::EmitIsObject(Register input,
2343 Label* is_not_object,
2345 __ JumpIfSmi(input, is_not_object);
2347 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2348 __ Branch(is_object,
eq, input, Operand(temp2));
2355 __ Branch(is_not_object,
ne, temp2, Operand(zero_reg));
2359 __ Branch(is_not_object,
2366 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2369 Register temp2 = scratch0();
2372 EmitIsObject(reg, temp1, temp2,
2373 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2375 EmitBranch(instr, true_cond, temp2,
2380 Condition LCodeGen::EmitIsString(Register input,
2382 Label* is_not_string,
2385 __ JumpIfSmi(input, is_not_string);
2387 __ GetObjectType(input, temp1, temp1);
2393 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2398 instr->hydrogen()->value()->IsHeapObject()
2401 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2403 EmitBranch(instr, true_cond, temp1,
2408 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2411 EmitBranch(instr,
eq, at, Operand(zero_reg));
2415 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2419 if (!instr->hydrogen()->value()->IsHeapObject()) {
2420 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2425 EmitBranch(instr,
ne, at, Operand(zero_reg));
2431 case Token::EQ_STRICT:
2449 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2454 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2456 Condition condition = ComputeCompareCondition(op);
2458 EmitBranch(instr, condition, v0, Operand(zero_reg));
2462 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2471 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2474 if (from == to)
return eq;
2482 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2483 Register scratch = scratch0();
2486 if (!instr->hydrogen()->value()->IsHeapObject()) {
2487 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2490 __ GetObjectType(input, scratch, scratch);
2492 BranchCondition(instr->hydrogen()),
2494 Operand(TestType(instr->hydrogen())));
2498 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2500 Register result =
ToRegister(instr->result());
2502 __ AssertString(input);
2505 __ IndexFromHash(result, result);
2509 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2510 LHasCachedArrayIndexAndBranch* instr) {
2512 Register scratch = scratch0();
2517 EmitBranch(instr,
eq, at, Operand(zero_reg));
2523 void LCodeGen::EmitClassOfTest(Label* is_true,
2525 Handle<String>class_name,
2530 ASSERT(!input.is(temp2));
2533 __ JumpIfSmi(input, is_false);
2545 __ GetObjectType(input, temp, temp2);
2552 __ GetObjectType(input, temp, temp2);
2563 __ GetObjectType(temp, temp2, temp2);
2587 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2589 Register temp = scratch0();
2591 Handle<String> class_name = instr->hydrogen()->class_name();
2593 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2594 class_name, input, temp, temp2);
2596 EmitBranch(instr,
eq, temp, Operand(class_name));
2600 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2605 EmitBranch(instr,
eq, temp, Operand(instr->map()));
2609 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2611 Label true_label, done;
2614 Register result =
ToRegister(instr->result());
2618 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2620 __ Branch(&true_label,
eq, result, Operand(zero_reg));
2621 __ li(result, Operand(factory()->false_value()));
2623 __ bind(&true_label);
2624 __ li(result, Operand(factory()->true_value()));
2629 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2632 DeferredInstanceOfKnownGlobal(
LCodeGen* codegen,
2633 LInstanceOfKnownGlobal* instr)
2636 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2638 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
2639 Label* map_check() {
return &map_check_; }
2642 LInstanceOfKnownGlobal* instr_;
2646 DeferredInstanceOfKnownGlobal* deferred;
2647 deferred =
new(zone()) DeferredInstanceOfKnownGlobal(
this, instr);
2649 Label done, false_result;
2650 Register
object =
ToRegister(instr->value());
2652 Register result =
ToRegister(instr->result());
2658 __ JumpIfSmi(
object, &false_result);
2664 Register map = temp;
2668 __ bind(deferred->map_check());
2672 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2673 __ li(at, Operand(Handle<Object>(cell)));
2675 __ BranchShort(&cache_miss,
ne, map, Operand(at));
2684 __ bind(&cache_miss);
2686 __ LoadRoot(temp, Heap::kNullValueRootIndex);
2687 __ Branch(&false_result,
eq,
object, Operand(temp));
2691 __ Branch(&false_result, cc, temp, Operand(zero_reg));
2694 __ Branch(deferred->entry());
2696 __ bind(&false_result);
2697 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2701 __ bind(deferred->exit());
2708 Register result =
ToRegister(instr->result());
2718 InstanceofStub stub(flags);
2720 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
2721 LoadContextFromDeferred(instr->context());
2729 static const int kAdditionalDelta = 7;
2730 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2731 Label before_push_delta;
2732 __ bind(&before_push_delta);
2736 __ StoreToSafepointRegisterSlot(temp, temp);
2738 CallCodeGeneric(stub.GetCode(isolate()),
2739 RelocInfo::CODE_TARGET,
2741 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2742 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2743 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2746 __ StoreToSafepointRegisterSlot(result, result);
2750 void LCodeGen::DoCmpT(LCmpT* instr) {
2755 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2758 Condition condition = ComputeCompareCondition(op);
2765 __ LoadRoot(
ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2766 ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
2767 __ LoadRoot(
ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2772 void LCodeGen::DoReturn(LReturn* instr) {
2773 if (FLAG_trace &&
info()->IsOptimizing()) {
2780 __ CallRuntime(Runtime::kTraceExit, 1);
2782 if (
info()->saves_caller_doubles()) {
2783 RestoreCallerDoubles();
2785 int no_frame_start = -1;
2788 no_frame_start = masm_->pc_offset();
2791 if (instr->has_constant_parameter_count()) {
2792 int parameter_count =
ToInteger32(instr->constant_parameter_count());
2794 if (sp_delta != 0) {
2795 __ Addu(
sp,
sp, Operand(sp_delta));
2798 Register reg =
ToRegister(instr->parameter_count());
2807 if (no_frame_start != -1) {
2808 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2813 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2814 Register result =
ToRegister(instr->result());
2815 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2817 if (instr->hydrogen()->RequiresHoleCheck()) {
2818 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2819 DeoptimizeIf(
eq, instr->environment(), result, Operand(at));
2824 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2829 __ li(a2, Operand(instr->name()));
2832 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2836 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2838 Register cell = scratch0();
2841 __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2847 if (instr->hydrogen()->RequiresHoleCheck()) {
2849 Register payload =
ToRegister(instr->temp());
2851 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2852 DeoptimizeIf(
eq, instr->environment(), payload, Operand(at));
2862 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2863 Register context =
ToRegister(instr->context());
2864 Register result =
ToRegister(instr->result());
2867 if (instr->hydrogen()->RequiresHoleCheck()) {
2868 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2870 if (instr->hydrogen()->DeoptimizesOnHole()) {
2871 DeoptimizeIf(
eq, instr->environment(), result, Operand(at));
2874 __ Branch(&is_not_hole,
ne, result, Operand(at));
2875 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2876 __ bind(&is_not_hole);
2882 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2883 Register context =
ToRegister(instr->context());
2885 Register scratch = scratch0();
2888 Label skip_assignment;
2890 if (instr->hydrogen()->RequiresHoleCheck()) {
2891 __ lw(scratch, target);
2892 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2894 if (instr->hydrogen()->DeoptimizesOnHole()) {
2895 DeoptimizeIf(
eq, instr->environment(), scratch, Operand(at));
2897 __ Branch(&skip_assignment,
ne, scratch, Operand(at));
2901 __ sw(value, target);
2902 if (instr->hydrogen()->NeedsWriteBarrier()) {
2904 instr->hydrogen()->value()->IsHeapObject()
2906 __ RecordWriteContextSlot(context,
2916 __ bind(&skip_assignment);
2920 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2921 HObjectAccess access = instr->hydrogen()->access();
2922 int offset = access.offset();
2923 Register
object =
ToRegister(instr->object());
2925 if (access.IsExternalMemory()) {
2926 Register result =
ToRegister(instr->result());
2928 __ Load(result, operand, access.representation());
2932 if (instr->hydrogen()->representation().IsDouble()) {
2938 Register result =
ToRegister(instr->result());
2939 if (!access.IsInobject()) {
2944 __ Load(result, operand, access.representation());
2948 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2954 __ li(a2, Operand(instr->name()));
2956 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2960 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2961 Register scratch = scratch0();
2962 Register
function =
ToRegister(instr->function());
2963 Register result =
ToRegister(instr->result());
2967 __ GetObjectType(
function, result, scratch);
2974 __ Branch(&non_instance,
ne, scratch, Operand(zero_reg));
2981 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2982 DeoptimizeIf(
eq, instr->environment(), result, Operand(at));
2986 __ GetObjectType(result, scratch, scratch);
2995 __ bind(&non_instance);
3003 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3004 Register result =
ToRegister(instr->result());
3005 __ LoadRoot(result, instr->index());
3009 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3010 Register arguments =
ToRegister(instr->arguments());
3011 Register result =
ToRegister(instr->result());
3014 if (instr->length()->IsConstantOperand()) {
3015 int const_length =
ToInteger32(LConstantOperand::cast(instr->length()));
3016 if (instr->index()->IsConstantOperand()) {
3017 int const_index =
ToInteger32(LConstantOperand::cast(instr->index()));
3018 int index = (const_length - const_index) + 1;
3022 __ li(at, Operand(const_length + 1));
3023 __ Subu(result, at, index);
3025 __ Addu(at, arguments, at);
3028 }
else if (instr->index()->IsConstantOperand()) {
3029 Register length =
ToRegister(instr->length());
3030 int const_index =
ToInteger32(LConstantOperand::cast(instr->index()));
3031 int loc = const_index - 1;
3033 __ Subu(result, length, Operand(loc));
3035 __ Addu(at, arguments, at);
3039 __ Addu(at, arguments, at);
3043 Register length =
ToRegister(instr->length());
3045 __ Subu(result, length, index);
3046 __ Addu(result, result, 1);
3048 __ Addu(at, arguments, at);
3054 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3055 Register external_pointer =
ToRegister(instr->elements());
3058 bool key_is_constant = instr->key()->IsConstantOperand();
3059 int constant_key = 0;
3060 if (key_is_constant) {
3061 constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
3062 if (constant_key & 0xF0000000) {
3063 Abort(kArrayIndexConstantValueTooBig);
3069 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3070 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
3080 (instr->additional_index() << element_size_shift) + additional_offset;
3082 if (key_is_constant) {
3083 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
3085 __ sll(scratch0(), key, shift_size);
3086 __ Addu(scratch0(), scratch0(), external_pointer);
3091 __ cvt_d_s(result, result);
3096 Register result =
ToRegister(instr->result());
3098 key, external_pointer, key_is_constant, constant_key,
3099 element_size_shift, shift_size,
3100 instr->additional_index(), additional_offset);
3101 switch (elements_kind) {
3104 __ lb(result, mem_operand);
3110 __ lbu(result, mem_operand);
3114 __ lh(result, mem_operand);
3118 __ lhu(result, mem_operand);
3122 __ lw(result, mem_operand);
3126 __ lw(result, mem_operand);
3129 result, Operand(0x80000000));
3151 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3152 Register elements =
ToRegister(instr->elements());
3153 bool key_is_constant = instr->key()->IsConstantOperand();
3156 Register scratch = scratch0();
3162 (instr->additional_index() << element_size_shift);
3163 if (key_is_constant) {
3164 int constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
3165 if (constant_key & 0xF0000000) {
3166 Abort(kArrayIndexConstantValueTooBig);
3168 base_offset += constant_key << element_size_shift;
3170 __ Addu(scratch, elements, Operand(base_offset));
3172 if (!key_is_constant) {
3174 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3175 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
3176 __ sll(at, key, shift_size);
3177 __ Addu(scratch, scratch, at);
3182 if (instr->hydrogen()->RequiresHoleCheck()) {
3189 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3190 Register elements =
ToRegister(instr->elements());
3191 Register result =
ToRegister(instr->result());
3192 Register scratch = scratch0();
3193 Register store_base = scratch;
3196 if (instr->key()->IsConstantOperand()) {
3197 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3199 instr->additional_index());
3200 store_base = elements;
3207 if (instr->hydrogen()->key()->representation().IsSmi()) {
3209 __ addu(scratch, elements, scratch);
3212 __ addu(scratch, elements, scratch);
3219 if (instr->hydrogen()->RequiresHoleCheck()) {
3221 __ SmiTst(result, scratch);
3222 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(zero_reg));
3224 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3225 DeoptimizeIf(
eq, instr->environment(), result, Operand(scratch));
3231 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3232 if (instr->is_typed_elements()) {
3233 DoLoadKeyedExternalArray(instr);
3234 }
else if (instr->hydrogen()->representation().IsDouble()) {
3235 DoLoadKeyedFixedDoubleArray(instr);
3237 DoLoadKeyedFixedArray(instr);
3244 bool key_is_constant,
3248 int additional_index,
3249 int additional_offset) {
3250 int base_offset = (additional_index << element_size) + additional_offset;
3251 if (key_is_constant) {
3253 base_offset + (constant_key << element_size));
3256 if (additional_offset != 0) {
3257 if (shift_size >= 0) {
3258 __ sll(scratch0(), key, shift_size);
3259 __ Addu(scratch0(), scratch0(), Operand(base_offset));
3262 __ srl(scratch0(), key, 1);
3263 __ Addu(scratch0(), scratch0(), Operand(base_offset));
3265 __ Addu(scratch0(), base, scratch0());
3269 if (additional_index != 0) {
3270 additional_index *= 1 << (element_size - shift_size);
3271 __ Addu(scratch0(), key, Operand(additional_index));
3274 if (additional_index == 0) {
3275 if (shift_size >= 0) {
3276 __ sll(scratch0(), key, shift_size);
3277 __ Addu(scratch0(), base, scratch0());
3281 __ srl(scratch0(), key, 1);
3282 __ Addu(scratch0(), base, scratch0());
3287 if (shift_size >= 0) {
3288 __ sll(scratch0(), scratch0(), shift_size);
3289 __ Addu(scratch0(), base, scratch0());
3293 __ srl(scratch0(), scratch0(), 1);
3294 __ Addu(scratch0(), base, scratch0());
3300 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3305 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3306 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3310 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3311 Register scratch = scratch0();
3312 Register temp = scratch1();
3313 Register result =
ToRegister(instr->result());
3315 if (instr->hydrogen()->from_inlined()) {
3319 Label done, adapted;
3326 __ Movn(result,
fp, temp);
3327 __ Movz(result, scratch, temp);
3332 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3333 Register elem =
ToRegister(instr->elements());
3334 Register result =
ToRegister(instr->result());
3339 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3340 __ Branch(&done,
eq,
fp, Operand(elem));
3346 __ SmiUntag(result);
3353 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3354 Register receiver =
ToRegister(instr->receiver());
3355 Register
function =
ToRegister(instr->function());
3356 Register result =
ToRegister(instr->result());
3357 Register scratch = scratch0();
3362 Label global_object, result_in_receiver;
3364 if (!instr->hydrogen()->known_function()) {
3373 int32_t strict_mode_function_mask =
3376 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3377 __ Branch(&result_in_receiver,
ne, scratch, Operand(zero_reg));
3381 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3382 __ Branch(&global_object,
eq, receiver, Operand(scratch));
3383 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3384 __ Branch(&global_object,
eq, receiver, Operand(scratch));
3387 __ SmiTst(receiver, scratch);
3388 DeoptimizeIf(
eq, instr->environment(), scratch, Operand(zero_reg));
3390 __ GetObjectType(receiver, scratch, scratch);
3391 DeoptimizeIf(
lt, instr->environment(),
3394 __ Branch(&result_in_receiver);
3395 __ bind(&global_object);
3402 if (result.is(receiver)) {
3403 __ bind(&result_in_receiver);
3406 __ Branch(&result_ok);
3407 __ bind(&result_in_receiver);
3408 __ mov(result, receiver);
3409 __ bind(&result_ok);
3414 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3415 Register receiver =
ToRegister(instr->receiver());
3416 Register
function =
ToRegister(instr->function());
3417 Register length =
ToRegister(instr->length());
3418 Register elements =
ToRegister(instr->elements());
3419 Register scratch = scratch0();
3426 const uint32_t kArgumentsLimit = 1 *
KB;
3427 DeoptimizeIf(
hi, instr->environment(), length, Operand(kArgumentsLimit));
3432 __ Move(receiver, length);
3441 __ sll(scratch, length, 2);
3443 __ Addu(scratch, elements, scratch);
3446 __ Subu(length, length, Operand(1));
3448 __ sll(scratch, length, 2);
3451 ASSERT(instr->HasPointerMap());
3452 LPointerMap* pointers = instr->pointer_map();
3454 this, pointers, Safepoint::kLazyDeopt);
3457 ParameterCount actual(receiver);
3458 __ InvokeFunction(
function, actual,
CALL_FUNCTION, safepoint_generator);
3462 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3463 LOperand* argument = instr->value();
3464 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3465 Abort(kDoPushArgumentNotImplementedForDoubleType);
3468 __ push(argument_reg);
3473 void LCodeGen::DoDrop(LDrop* instr) {
3474 __ Drop(instr->count());
3478 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3479 Register result =
ToRegister(instr->result());
3484 void LCodeGen::DoContext(LContext* instr) {
3486 Register result =
ToRegister(instr->result());
3487 if (
info()->IsOptimizing()) {
3496 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3498 __ li(scratch0(), instr->hydrogen()->pairs());
3499 __ li(scratch1(), Operand(
Smi::FromInt(instr->hydrogen()->flags())));
3501 __ Push(
cp, scratch0(), scratch1());
3502 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3506 void LCodeGen::CallKnownFunction(Handle<JSFunction>
function,
3509 LInstruction* instr,
3511 bool dont_adapt_arguments =
3513 bool can_invoke_directly =
3514 dont_adapt_arguments || formal_parameter_count == arity;
3516 LPointerMap* pointers = instr->pointer_map();
3518 if (can_invoke_directly) {
3519 if (a1_state == A1_UNINITIALIZED) {
3520 __ li(a1,
function);
3528 if (dont_adapt_arguments) {
3529 __ li(a0, Operand(arity));
3537 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3540 ParameterCount count(arity);
3541 ParameterCount expected(formal_parameter_count);
3542 __ InvokeFunction(
function, expected, count,
CALL_FUNCTION, generator);
3551 Register result =
ToRegister(instr->result());
3552 Register scratch = scratch0();
3556 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3557 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(at));
3560 Register exponent = scratch0();
3565 __ Move(result, input);
3567 __ Branch(&done,
eq, at, Operand(zero_reg));
3572 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
3576 Register tmp1 = input.is(a1) ? a0 : a1;
3577 Register tmp2 = input.is(a2) ? a0 : a2;
3578 Register tmp3 = input.is(a3) ? a0 : a3;
3579 Register tmp4 = input.is(t0) ? a0 : t0;
3583 Label allocated, slow;
3584 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3585 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3586 __ Branch(&allocated);
3591 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3597 __ LoadFromSafepointRegisterSlot(input, input);
3600 __ bind(&allocated);
3608 __ StoreToSafepointRegisterSlot(tmp1, result);
3615 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3617 Register result =
ToRegister(instr->result());
3621 __ mov(result, input);
3622 __ subu(result, zero_reg, input);
3624 DeoptimizeIf(
lt, instr->environment(), result, Operand(zero_reg));
3629 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3633 DeferredMathAbsTaggedHeapNumber(
LCodeGen* codegen, LMathAbs* instr)
3636 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3638 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
3643 Representation r = instr->hydrogen()->value()->representation();
3647 __ abs_d(result, input);
3648 }
else if (r.IsSmiOrInteger32()) {
3649 EmitIntegerMathAbs(instr);
3652 DeferredMathAbsTaggedHeapNumber* deferred =
3653 new(zone()) DeferredMathAbsTaggedHeapNumber(
this, instr);
3656 __ JumpIfNotSmi(input, deferred->entry());
3658 EmitIntegerMathAbs(instr);
3659 __ bind(deferred->exit());
3664 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3666 Register result =
ToRegister(instr->result());
3667 Register scratch1 = scratch0();
3668 Register except_flag =
ToRegister(instr->temp());
3678 DeoptimizeIf(
ne, instr->environment(), except_flag, Operand(zero_reg));
3683 __ Branch(&done,
ne, result, Operand(zero_reg));
3684 __ mfc1(scratch1, input.high());
3686 DeoptimizeIf(
ne, instr->environment(), scratch1, Operand(zero_reg));
3692 void LCodeGen::DoMathRound(LMathRound* instr) {
3694 Register result =
ToRegister(instr->result());
3696 Register scratch = scratch0();
3697 Label done, check_sign_on_zero;
3700 __ mfc1(result, input.high());
3709 __ mov(result, zero_reg);
3711 __ Branch(&check_sign_on_zero);
3719 DeoptimizeIf(
ge, instr->environment(), scratch,
3725 __ Move(double_scratch0(), 0.5);
3726 __ add_d(double_scratch0(), input, double_scratch0());
3730 __ mfc1(result, double_scratch0().high());
3731 __ Xor(result, result, Operand(scratch));
3734 DeoptimizeIf(
lt, instr->environment(), result,
3740 __ Branch(&skip2,
ge, result, Operand(zero_reg));
3741 __ mov(result, zero_reg);
3746 Register except_flag = scratch;
3754 DeoptimizeIf(
ne, instr->environment(), except_flag, Operand(zero_reg));
3758 __ Branch(&done,
ne, result, Operand(zero_reg));
3759 __ bind(&check_sign_on_zero);
3760 __ mfc1(scratch, input.high());
3762 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(zero_reg));
3768 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3771 __ sqrt_d(result, input);
3775 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3780 ASSERT(!input.is(result));
3790 __ neg_d(result, temp);
3794 __ sqrt_d(result, result);
3799 void LCodeGen::DoPower(LPower* instr) {
3800 Representation exponent_type = instr->hydrogen()->right()->representation();
3803 ASSERT(!instr->right()->IsDoubleRegister() ||
3805 ASSERT(!instr->right()->IsRegister() ||
3810 if (exponent_type.IsSmi()) {
3813 }
else if (exponent_type.IsTagged()) {
3815 __ JumpIfSmi(a2, &no_deopt);
3817 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3818 DeoptimizeIf(
ne, instr->environment(), t3, Operand(at));
3822 }
else if (exponent_type.IsInteger32()) {
3826 ASSERT(exponent_type.IsDouble());
3833 void LCodeGen::DoMathExp(LMathExp* instr) {
3842 masm(), input, result, double_scratch1, double_scratch2,
3843 temp1, temp2, scratch0());
3847 void LCodeGen::DoMathLog(LMathLog* instr) {
3848 __ PrepareCallCFunction(0, 1, scratch0());
3850 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3856 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3858 Register result =
ToRegister(instr->result());
3859 __ Clz(result, input);
3863 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3866 ASSERT(instr->HasPointerMap());
3868 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3869 if (known_function.is_null()) {
3870 LPointerMap* pointers = instr->pointer_map();
3872 ParameterCount count(instr->arity());
3875 CallKnownFunction(known_function,
3876 instr->hydrogen()->formal_parameter_count(),
3879 A1_CONTAINS_TARGET);
3884 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3887 LPointerMap* pointers = instr->pointer_map();
3890 if (instr->target()->IsConstantOperand()) {
3891 LConstantOperand* target = LConstantOperand::cast(instr->target());
3893 generator.BeforeCall(
__ CallSize(code, RelocInfo::CODE_TARGET));
3894 __ Call(code, RelocInfo::CODE_TARGET);
3896 ASSERT(instr->target()->IsRegister());
3897 Register target =
ToRegister(instr->target());
3898 generator.BeforeCall(
__ CallSize(target));
3902 generator.AfterCall();
3906 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3910 if (instr->hydrogen()->pass_argument_count()) {
3911 __ li(a0, Operand(instr->arity()));
3921 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3925 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3930 int arity = instr->arity();
3931 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
3932 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3936 void LCodeGen::DoCallNew(LCallNew* instr) {
3941 __ li(a0, Operand(instr->arity()));
3943 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3945 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3949 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3954 __ li(a0, Operand(instr->arity()));
3955 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3956 ElementsKind kind = instr->hydrogen()->elements_kind();
3962 if (instr->arity() == 0) {
3963 ArrayNoArgumentConstructorStub stub(kind, override_mode);
3964 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3965 }
else if (instr->arity() == 1) {
3972 __ Branch(&packed_case,
eq, t1, Operand(zero_reg));
3975 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
3976 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3978 __ bind(&packed_case);
3981 ArraySingleArgumentConstructorStub stub(kind, override_mode);
3982 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3985 ArrayNArgumentsConstructorStub stub(kind, override_mode);
3986 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3991 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3992 CallRuntime(instr->function(), instr->arity(), instr);
3996 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3997 Register
function =
ToRegister(instr->function());
3998 Register code_object =
ToRegister(instr->code_object());
3999 __ Addu(code_object, code_object,
4006 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4007 Register result =
ToRegister(instr->result());
4008 Register base =
ToRegister(instr->base_object());
4009 if (instr->offset()->IsConstantOperand()) {
4010 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4013 Register offset =
ToRegister(instr->offset());
4014 __ Addu(result, base, offset);
4019 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4020 Representation representation = instr->representation();
4022 Register
object =
ToRegister(instr->object());
4023 Register scratch = scratch0();
4024 HObjectAccess access = instr->hydrogen()->access();
4025 int offset = access.offset();
4027 if (access.IsExternalMemory()) {
4030 __ Store(value, operand, representation);
4034 Handle<Map> transition = instr->transition();
4036 instr->hydrogen()->value()->IsHeapObject()
4039 ASSERT(!(representation.IsSmi() &&
4040 instr->value()->IsConstantOperand() &&
4041 !
IsSmi(LConstantOperand::cast(instr->value()))));
4042 if (representation.IsHeapObject()) {
4044 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4045 __ SmiTst(value, scratch);
4046 DeoptimizeIf(
eq, instr->environment(), scratch, Operand(zero_reg));
4051 }
else if (representation.IsDouble()) {
4052 ASSERT(transition.is_null());
4053 ASSERT(access.IsInobject());
4054 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4060 if (!transition.is_null()) {
4061 __ li(scratch, Operand(transition));
4063 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4066 __ RecordWriteField(
object,
4079 if (access.IsInobject()) {
4081 __ Store(value, operand, representation);
4082 if (instr->hydrogen()->NeedsWriteBarrier()) {
4084 __ RecordWriteField(
object,
4096 __ Store(value, operand, representation);
4097 if (instr->hydrogen()->NeedsWriteBarrier()) {
4100 __ RecordWriteField(scratch,
4113 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4119 __ li(a2, Operand(instr->name()));
4121 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4125 void LCodeGen::ApplyCheckIf(
Condition condition,
4126 LBoundsCheck* check,
4128 const Operand& src2) {
4129 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4132 __ stop(
"eliminated bounds check failed");
4135 DeoptimizeIf(condition, check->environment(), src1, src2);
4140 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4141 if (instr->hydrogen()->skip_check())
return;
4143 Condition condition = instr->hydrogen()->allow_equality() ?
hi :
hs;
4144 if (instr->index()->IsConstantOperand()) {
4145 int constant_index =
4146 ToInteger32(LConstantOperand::cast(instr->index()));
4147 if (instr->hydrogen()->length()->representation().IsSmi()) {
4150 __ li(at, Operand(constant_index));
4152 ApplyCheckIf(condition,
4157 ApplyCheckIf(condition,
4165 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4166 Register external_pointer =
ToRegister(instr->elements());
4169 bool key_is_constant = instr->key()->IsConstantOperand();
4170 int constant_key = 0;
4171 if (key_is_constant) {
4172 constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
4173 if (constant_key & 0xF0000000) {
4174 Abort(kArrayIndexConstantValueTooBig);
4180 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4181 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
4191 (instr->additional_index() << element_size_shift) + additional_offset;
4192 Register address = scratch0();
4194 if (key_is_constant) {
4195 if (constant_key != 0) {
4196 __ Addu(address, external_pointer,
4197 Operand(constant_key << element_size_shift));
4199 address = external_pointer;
4202 __ sll(address, key, shift_size);
4203 __ Addu(address, external_pointer, address);
4208 __ cvt_s_d(double_scratch0(), value);
4209 __ swc1(double_scratch0(),
MemOperand(address, base_offset));
4216 key, external_pointer, key_is_constant, constant_key,
4217 element_size_shift, shift_size,
4218 instr->additional_index(), additional_offset);
4219 switch (elements_kind) {
4226 __ sb(value, mem_operand);
4232 __ sh(value, mem_operand);
4238 __ sw(value, mem_operand);
4259 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4261 Register elements =
ToRegister(instr->elements());
4262 Register scratch = scratch0();
4264 bool key_is_constant = instr->key()->IsConstantOperand();
4265 Label not_nan, done;
4270 if (key_is_constant) {
4271 int constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
4272 if (constant_key & 0xF0000000) {
4273 Abort(kArrayIndexConstantValueTooBig);
4275 __ Addu(scratch, elements,
4276 Operand((constant_key << element_size_shift) +
4279 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4280 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
4281 __ Addu(scratch, elements,
4284 __ Addu(scratch, scratch, at);
4287 if (instr->NeedsCanonicalization()) {
4290 __ BranchF(
NULL, &is_nan,
eq, value, value);
4291 __ Branch(¬_nan);
4295 __ LoadRoot(at, Heap::kNanValueRootIndex);
4297 __ sdc1(double_scratch,
MemOperand(scratch, instr->additional_index() <<
4298 element_size_shift));
4303 __ sdc1(value,
MemOperand(scratch, instr->additional_index() <<
4304 element_size_shift));
4309 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4311 Register elements =
ToRegister(instr->elements());
4314 Register scratch = scratch0();
4315 Register store_base = scratch;
4319 if (instr->key()->IsConstantOperand()) {
4320 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4321 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4323 instr->additional_index());
4324 store_base = elements;
4330 if (instr->hydrogen()->key()->representation().IsSmi()) {
4332 __ addu(scratch, elements, scratch);
4335 __ addu(scratch, elements, scratch);
4341 if (instr->hydrogen()->NeedsWriteBarrier()) {
4343 instr->hydrogen()->value()->IsHeapObject()
4347 __ RecordWrite(elements,
4358 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4360 if (instr->is_typed_elements()) {
4361 DoStoreKeyedExternalArray(instr);
4362 }
else if (instr->hydrogen()->value()->representation().IsDouble()) {
4363 DoStoreKeyedFixedDoubleArray(instr);
4365 DoStoreKeyedFixedArray(instr);
4370 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4376 Handle<Code> ic = (instr->strict_mode() ==
STRICT)
4377 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4378 : isolate()->builtins()->KeyedStoreIC_Initialize();
4379 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4383 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4384 Register object_reg =
ToRegister(instr->object());
4385 Register scratch = scratch0();
4387 Handle<Map> from_map = instr->original_map();
4388 Handle<Map> to_map = instr->transitioned_map();
4392 Label not_applicable;
4394 __ Branch(¬_applicable,
ne, scratch, Operand(from_map));
4397 Register new_map_reg =
ToRegister(instr->new_map_temp());
4398 __ li(new_map_reg, Operand(to_map));
4405 PushSafepointRegistersScope scope(
4406 this, Safepoint::kWithRegistersAndDoubles);
4407 __ mov(a0, object_reg);
4408 __ li(a1, Operand(to_map));
4409 bool is_js_array = from_map->instance_type() ==
JS_ARRAY_TYPE;
4410 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
4412 RecordSafepointWithRegistersAndDoubles(
4413 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4415 __ bind(¬_applicable);
4419 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4420 Register
object =
ToRegister(instr->object());
4422 Label no_memento_found;
4423 __ TestJSArrayForAllocationMemento(
object, temp, &no_memento_found,
4424 ne, &no_memento_found);
4425 DeoptimizeIf(
al, instr->environment());
4426 __ bind(&no_memento_found);
4430 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4434 StringAddStub stub(instr->hydrogen()->flags(),
4435 instr->hydrogen()->pretenure_flag());
4436 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4440 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4443 DeferredStringCharCodeAt(
LCodeGen* codegen, LStringCharCodeAt* instr)
4446 codegen()->DoDeferredStringCharCodeAt(instr_);
4448 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4450 LStringCharCodeAt* instr_;
4453 DeferredStringCharCodeAt* deferred =
4454 new(zone()) DeferredStringCharCodeAt(
this, instr);
4460 __ bind(deferred->exit());
4465 Register
string =
ToRegister(instr->string());
4466 Register result =
ToRegister(instr->result());
4467 Register scratch = scratch0();
4472 __ mov(result, zero_reg);
4474 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4478 if (instr->index()->IsConstantOperand()) {
4479 int const_index =
ToInteger32(LConstantOperand::cast(instr->index()));
4487 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
4491 __ StoreToSafepointRegisterSlot(v0, result);
4495 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4498 DeferredStringCharFromCode(
LCodeGen* codegen, LStringCharFromCode* instr)
4501 codegen()->DoDeferredStringCharFromCode(instr_);
4503 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4505 LStringCharFromCode* instr_;
4508 DeferredStringCharFromCode* deferred =
4509 new(zone()) DeferredStringCharFromCode(
this, instr);
4511 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4512 Register char_code =
ToRegister(instr->char_code());
4513 Register result =
ToRegister(instr->result());
4514 Register scratch = scratch0();
4515 ASSERT(!char_code.is(result));
4517 __ Branch(deferred->entry(),
hi,
4519 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4521 __ Addu(result, result, scratch);
4523 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4524 __ Branch(deferred->entry(),
eq, result, Operand(scratch));
4525 __ bind(deferred->exit());
4530 Register char_code =
ToRegister(instr->char_code());
4531 Register result =
ToRegister(instr->result());
4536 __ mov(result, zero_reg);
4538 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4539 __ SmiTag(char_code);
4541 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4542 __ StoreToSafepointRegisterSlot(v0, result);
4546 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4547 LOperand* input = instr->value();
4548 ASSERT(input->IsRegister() || input->IsStackSlot());
4549 LOperand* output = instr->result();
4550 ASSERT(output->IsDoubleRegister());
4551 FPURegister single_scratch = double_scratch0().
low();
4552 if (input->IsStackSlot()) {
4553 Register scratch = scratch0();
4555 __ mtc1(scratch, single_scratch);
4563 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4564 LOperand* input = instr->value();
4565 LOperand* output = instr->result();
4567 FPURegister dbl_scratch = double_scratch0();
4573 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4576 DeferredNumberTagI(
LCodeGen* codegen, LNumberTagI* instr)
4579 codegen()->DoDeferredNumberTagIU(instr_,
4585 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4587 LNumberTagI* instr_;
4592 Register overflow = scratch0();
4594 DeferredNumberTagI* deferred =
new(zone()) DeferredNumberTagI(
this, instr);
4595 __ SmiTagCheckOverflow(dst, src, overflow);
4596 __ BranchOnOverflow(deferred->entry(),
overflow);
4597 __ bind(deferred->exit());
4601 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4604 DeferredNumberTagU(
LCodeGen* codegen, LNumberTagU* instr)
4607 codegen()->DoDeferredNumberTagIU(instr_,
4613 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4615 LNumberTagU* instr_;
4619 Register result =
ToRegister(instr->result());
4621 DeferredNumberTagU* deferred =
new(zone()) DeferredNumberTagU(
this, instr);
4623 __ SmiTag(result, input);
4624 __ bind(deferred->exit());
4632 IntegerSignedness signedness) {
4636 Register tmp1 = scratch0();
4646 __ SmiUntag(src, dst);
4647 __ Xor(src, src, Operand(0x80000000));
4649 __ mtc1(src, dbl_scratch);
4650 __ cvt_d_w(dbl_scratch, dbl_scratch);
4652 __ mtc1(src, dbl_scratch);
4653 __ Cvt_d_uw(dbl_scratch, dbl_scratch,
f22);
4656 if (FLAG_inline_new) {
4657 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4668 __ mov(dst, zero_reg);
4671 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4679 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4680 RecordSafepointWithRegisters(
4681 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4683 __ StoreToSafepointRegisterSlot(v0, dst);
4695 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4698 DeferredNumberTagD(
LCodeGen* codegen, LNumberTagD* instr)
4701 codegen()->DoDeferredNumberTagD(instr_);
4703 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4705 LNumberTagD* instr_;
4709 Register scratch = scratch0();
4714 DeferredNumberTagD* deferred =
new(zone()) DeferredNumberTagD(
this, instr);
4715 if (FLAG_inline_new) {
4716 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4718 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4721 __ Branch(deferred->entry());
4723 __ bind(deferred->exit());
4735 __ mov(reg, zero_reg);
4737 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4744 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4745 RecordSafepointWithRegisters(
4746 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4748 __ StoreToSafepointRegisterSlot(v0, reg);
4752 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4753 HChange* hchange = instr->hydrogen();
4755 Register output =
ToRegister(instr->result());
4758 __ And(at, input, Operand(0xc0000000));
4759 DeoptimizeIf(
ne, instr->environment(), at, Operand(zero_reg));
4763 __ SmiTagCheckOverflow(output, input, at);
4764 DeoptimizeIf(
lt, instr->environment(), at, Operand(zero_reg));
4766 __ SmiTag(output, input);
4771 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4772 Register scratch = scratch0();
4774 Register result =
ToRegister(instr->result());
4775 if (instr->needs_check()) {
4779 __ SmiUntag(result, input);
4780 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(zero_reg));
4782 __ SmiUntag(result, input);
4787 void LCodeGen::EmitNumberUntagD(Register input_reg,
4789 bool can_convert_undefined_to_nan,
4790 bool deoptimize_on_minus_zero,
4793 Register scratch = scratch0();
4794 Label convert, load_smi, done;
4797 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4800 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4801 if (can_convert_undefined_to_nan) {
4802 __ Branch(&convert,
ne, scratch, Operand(at));
4804 DeoptimizeIf(
ne, env, scratch, Operand(at));
4808 if (deoptimize_on_minus_zero) {
4809 __ mfc1(at, result_reg.low());
4810 __ Branch(&done,
ne, at, Operand(zero_reg));
4811 __ mfc1(scratch, result_reg.high());
4815 if (can_convert_undefined_to_nan) {
4818 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4819 DeoptimizeIf(
ne, env, input_reg, Operand(at));
4820 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4825 __ SmiUntag(scratch, input_reg);
4831 __ mtc1(scratch, result_reg);
4832 __ cvt_d_w(result_reg, result_reg);
4838 Register input_reg =
ToRegister(instr->value());
4839 Register scratch1 = scratch0();
4840 Register scratch2 =
ToRegister(instr->temp());
4844 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4845 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4852 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4856 if (instr->truncating()) {
4859 Label no_heap_number, check_bools, check_false;
4862 __ mov(scratch2, input_reg);
4863 __ TruncateHeapNumberToI(input_reg, scratch2);
4868 __ bind(&no_heap_number);
4869 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4870 __ Branch(&check_bools,
ne, input_reg, Operand(at));
4873 __ mov(input_reg, zero_reg);
4875 __ bind(&check_bools);
4876 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4877 __ Branch(&check_false,
ne, scratch2, Operand(at));
4879 __ li(input_reg, Operand(1));
4881 __ bind(&check_false);
4882 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4883 DeoptimizeIf(
ne, instr->environment(), scratch2, Operand(at));
4885 __ mov(input_reg, zero_reg);
4888 DeoptimizeIf(
ne, instr->environment(), scratch1, Operand(at));
4891 __ ldc1(double_scratch,
4894 Register except_flag = scratch2;
4904 DeoptimizeIf(
ne, instr->environment(), except_flag, Operand(zero_reg));
4907 __ Branch(&done,
ne, input_reg, Operand(zero_reg));
4909 __ mfc1(scratch1, double_scratch.high());
4911 DeoptimizeIf(
ne, instr->environment(), scratch1, Operand(zero_reg));
4918 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4921 DeferredTaggedToI(
LCodeGen* codegen, LTaggedToI* instr)
4924 codegen()->DoDeferredTaggedToI(instr_);
4926 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4931 LOperand* input = instr->value();
4932 ASSERT(input->IsRegister());
4933 ASSERT(input->Equals(instr->result()));
4937 if (instr->hydrogen()->value()->representation().IsSmi()) {
4938 __ SmiUntag(input_reg);
4940 DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(
this, instr);
4943 __ JumpIfNotSmi(input_reg, deferred->entry());
4946 __ SmiUntag(input_reg);
4947 __ bind(deferred->exit());
4952 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4953 LOperand* input = instr->value();
4954 ASSERT(input->IsRegister());
4955 LOperand* result = instr->result();
4956 ASSERT(result->IsDoubleRegister());
4961 HValue* value = instr->hydrogen()->value();
4965 EmitNumberUntagD(input_reg, result_reg,
4966 instr->hydrogen()->can_convert_undefined_to_nan(),
4967 instr->hydrogen()->deoptimize_on_minus_zero(),
4968 instr->environment(),
4973 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4974 Register result_reg =
ToRegister(instr->result());
4975 Register scratch1 = scratch0();
4978 if (instr->truncating()) {
4979 __ TruncateDoubleToI(result_reg, double_input);
4981 Register except_flag = LCodeGen::scratch1();
4992 DeoptimizeIf(
ne, instr->environment(), except_flag, Operand(zero_reg));
4996 __ Branch(&done,
ne, result_reg, Operand(zero_reg));
4997 __ mfc1(scratch1, double_input.high());
4999 DeoptimizeIf(
ne, instr->environment(), scratch1, Operand(zero_reg));
5006 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5007 Register result_reg =
ToRegister(instr->result());
5008 Register scratch1 = LCodeGen::scratch0();
5011 if (instr->truncating()) {
5012 __ TruncateDoubleToI(result_reg, double_input);
5014 Register except_flag = LCodeGen::scratch1();
5025 DeoptimizeIf(
ne, instr->environment(), except_flag, Operand(zero_reg));
5029 __ Branch(&done,
ne, result_reg, Operand(zero_reg));
5030 __ mfc1(scratch1, double_input.high());
5032 DeoptimizeIf(
ne, instr->environment(), scratch1, Operand(zero_reg));
5036 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5037 DeoptimizeIf(
lt, instr->environment(), scratch1, Operand(zero_reg));
5041 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5042 LOperand* input = instr->value();
5044 DeoptimizeIf(
ne, instr->environment(), at, Operand(zero_reg));
5048 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5049 if (!instr->hydrogen()->value()->IsHeapObject()) {
5050 LOperand* input = instr->value();
5052 DeoptimizeIf(
eq, instr->environment(), at, Operand(zero_reg));
5057 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5059 Register scratch = scratch0();
5061 __ GetObjectType(input, scratch, scratch);
5063 if (instr->hydrogen()->is_interval_check()) {
5066 instr->hydrogen()->GetCheckInterval(&first, &last);
5069 if (first == last) {
5070 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(first));
5072 DeoptimizeIf(
lo, instr->environment(), scratch, Operand(first));
5075 DeoptimizeIf(
hi, instr->environment(), scratch, Operand(last));
5081 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5085 __ And(at, scratch, mask);
5086 DeoptimizeIf(tag == 0 ?
ne :
eq, instr->environment(),
5087 at, Operand(zero_reg));
5089 __ And(scratch, scratch, Operand(mask));
5090 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(tag));
5096 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5098 Handle<HeapObject>
object = instr->hydrogen()->object().handle();
5100 if (isolate()->heap()->InNewSpace(*
object)) {
5102 Handle<Cell> cell = isolate()->factory()->NewCell(
object);
5103 __ li(at, Operand(Handle<Object>(cell)));
5105 DeoptimizeIf(
ne, instr->environment(), reg,
5108 DeoptimizeIf(
ne, instr->environment(), reg,
5116 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5118 __ mov(
cp, zero_reg);
5119 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5120 RecordSafepointWithRegisters(
5121 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5122 __ StoreToSafepointRegisterSlot(v0, scratch0());
5124 __ SmiTst(scratch0(), at);
5125 DeoptimizeIf(
eq, instr->environment(), at, Operand(zero_reg));
5129 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5132 DeferredCheckMaps(
LCodeGen* codegen, LCheckMaps* instr, Register
object)
5134 SetExit(check_maps());
5137 codegen()->DoDeferredInstanceMigration(instr_, object_);
5139 Label* check_maps() {
return &check_maps_; }
5140 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5147 if (instr->hydrogen()->CanOmitMapChecks())
return;
5148 Register map_reg = scratch0();
5149 LOperand* input = instr->value();
5150 ASSERT(input->IsRegister());
5154 DeferredCheckMaps* deferred =
NULL;
5155 if (instr->hydrogen()->has_migration_target()) {
5156 deferred =
new(zone()) DeferredCheckMaps(
this, instr, reg);
5157 __ bind(deferred->check_maps());
5160 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5162 for (
int i = 0; i < map_set.size() - 1; i++) {
5163 Handle<Map> map = map_set.at(i).handle();
5164 __ CompareMapAndBranch(map_reg, map, &success,
eq, &success);
5166 Handle<Map> map = map_set.at(map_set.size() - 1).
handle();
5168 if (instr->hydrogen()->has_migration_target()) {
5169 __ Branch(deferred->entry(),
ne, map_reg, Operand(map));
5171 DeoptimizeIf(
ne, instr->environment(), map_reg, Operand(map));
5178 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5180 Register result_reg =
ToRegister(instr->result());
5182 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5186 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5187 Register unclamped_reg =
ToRegister(instr->unclamped());
5188 Register result_reg =
ToRegister(instr->result());
5189 __ ClampUint8(result_reg, unclamped_reg);
5193 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5194 Register scratch = scratch0();
5195 Register input_reg =
ToRegister(instr->unclamped());
5196 Register result_reg =
ToRegister(instr->result());
5198 Label is_smi, done, heap_number;
5201 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5205 __ Branch(&heap_number,
eq, scratch, Operand(factory()->heap_number_map()));
5209 DeoptimizeIf(
ne, instr->environment(), input_reg,
5210 Operand(factory()->undefined_value()));
5211 __ mov(result_reg, zero_reg);
5215 __ bind(&heap_number);
5218 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5222 __ ClampUint8(result_reg, scratch);
5228 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5230 Register result_reg =
ToRegister(instr->result());
5231 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5232 __ FmoveHigh(result_reg, value_reg);
5234 __ FmoveLow(result_reg, value_reg);
5239 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5243 __ Move(result_reg, lo_reg, hi_reg);
5247 void LCodeGen::DoAllocate(LAllocate* instr) {
5250 DeferredAllocate(
LCodeGen* codegen, LAllocate* instr)
5253 codegen()->DoDeferredAllocate(instr_);
5255 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5260 DeferredAllocate* deferred =
5261 new(zone()) DeferredAllocate(
this, instr);
5263 Register result =
ToRegister(instr->result());
5264 Register scratch =
ToRegister(instr->temp1());
5265 Register scratch2 =
ToRegister(instr->temp2());
5269 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5272 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5273 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5274 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5276 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5277 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5280 if (instr->size()->IsConstantOperand()) {
5283 __ Allocate(size, result, scratch, scratch2, deferred->entry(),
flags);
5285 __ jmp(deferred->entry());
5297 __ bind(deferred->exit());
5299 if (instr->hydrogen()->MustPrefillWithFiller()) {
5300 if (instr->size()->IsConstantOperand()) {
5302 __ li(scratch, Operand(size));
5310 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5311 __ Addu(at, result, Operand(scratch));
5314 __ Branch(&loop,
ge, scratch, Operand(zero_reg));
5321 Register result =
ToRegister(instr->result());
5326 __ mov(result, zero_reg);
5328 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5329 if (instr->size()->IsRegister()) {
5331 ASSERT(!size.is(result));
5340 instr->hydrogen()->MustAllocateDoubleAligned());
5341 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5342 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5343 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5345 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5346 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5353 CallRuntimeFromDeferred(
5354 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5355 __ StoreToSafepointRegisterSlot(v0, result);
5359 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5363 CallRuntime(Runtime::kToFastProperties, 1, instr);
5367 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5375 int literal_offset =
5377 __ li(t3, instr->hydrogen()->literals());
5379 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5380 __ Branch(&materialized,
ne, a1, Operand(at));
5384 __ li(t2, Operand(
Smi::FromInt(instr->hydrogen()->literal_index())));
5385 __ li(t1, Operand(instr->hydrogen()->pattern()));
5386 __ li(t0, Operand(instr->hydrogen()->flags()));
5387 __ Push(t3, t2, t1, t0);
5388 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5391 __ bind(&materialized);
5393 Label allocated, runtime_allocate;
5395 __ Allocate(size, v0, a2, a3, &runtime_allocate,
TAG_OBJECT);
5398 __ bind(&runtime_allocate);
5401 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5404 __ bind(&allocated);
5413 if ((size % (2 * kPointerSize)) != 0) {
5420 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5424 bool pretenure = instr->hydrogen()->pretenure();
5425 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5426 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
5427 instr->hydrogen()->is_generator());
5428 __ li(a2, Operand(instr->hydrogen()->shared_info()));
5429 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5431 __ li(a2, Operand(instr->hydrogen()->shared_info()));
5432 __ li(a1, Operand(pretenure ? factory()->true_value()
5433 : factory()->false_value()));
5434 __ Push(
cp, a2, a1);
5435 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
5440 void LCodeGen::DoTypeof(LTypeof* instr) {
5444 CallRuntime(Runtime::kTypeof, 1, instr);
5448 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5452 Operand cmp2 = Operand(
no_reg);
5454 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5455 instr->FalseLabel(chunk_),
5457 instr->type_literal(),
5462 ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
5465 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5470 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5473 Handle<String> type_name,
5480 Register scratch = scratch0();
5481 if (type_name->Equals(heap()->number_string())) {
5482 __ JumpIfSmi(input, true_label);
5484 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5487 final_branch_condition =
eq;
5489 }
else if (type_name->Equals(heap()->string_string())) {
5490 __ JumpIfSmi(input, false_label);
5491 __ GetObjectType(input, input, scratch);
5499 cmp2 = Operand(zero_reg);
5500 final_branch_condition =
eq;
5502 }
else if (type_name->Equals(heap()->symbol_string())) {
5503 __ JumpIfSmi(input, false_label);
5504 __ GetObjectType(input, input, scratch);
5507 final_branch_condition =
eq;
5509 }
else if (type_name->Equals(heap()->boolean_string())) {
5510 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5512 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5514 cmp2 = Operand(input);
5515 final_branch_condition =
eq;
5517 }
else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5518 __ LoadRoot(at, Heap::kNullValueRootIndex);
5520 cmp2 = Operand(input);
5521 final_branch_condition =
eq;
5523 }
else if (type_name->Equals(heap()->undefined_string())) {
5524 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5528 __ JumpIfSmi(input, false_label);
5534 cmp2 = Operand(zero_reg);
5535 final_branch_condition =
ne;
5537 }
else if (type_name->Equals(heap()->function_string())) {
5539 __ JumpIfSmi(input, false_label);
5540 __ GetObjectType(input, scratch, input);
5544 final_branch_condition =
eq;
5546 }
else if (type_name->Equals(heap()->object_string())) {
5547 __ JumpIfSmi(input, false_label);
5548 if (!FLAG_harmony_typeof) {
5549 __ LoadRoot(at, Heap::kNullValueRootIndex);
5552 Register map = input;
5553 __ GetObjectType(input, map, scratch);
5554 __ Branch(false_label,
5563 cmp2 = Operand(zero_reg);
5564 final_branch_condition =
eq;
5568 cmp2 = Operand(zero_reg);
5569 __ Branch(false_label);
5572 return final_branch_condition;
5576 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5579 EmitIsConstructCall(temp1, scratch0());
5581 EmitBranch(instr,
eq, temp1,
5586 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5587 ASSERT(!temp1.is(temp2));
5592 Label check_frame_marker;
5594 __ Branch(&check_frame_marker,
ne, temp2,
5599 __ bind(&check_frame_marker);
5604 void LCodeGen::EnsureSpaceForLazyDeopt(
int space_needed) {
5605 if (!
info()->IsStub()) {
5608 int current_pc = masm()->pc_offset();
5609 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5610 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5612 while (padding_size > 0) {
5618 last_lazy_deopt_pc_ = masm()->pc_offset();
5622 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5623 last_lazy_deopt_pc_ = masm()->pc_offset();
5624 ASSERT(instr->HasEnvironment());
5626 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5627 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5631 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5641 Comment(
";;; deoptimize: %s", instr->hydrogen()->reason());
5642 DeoptimizeIf(
al, instr->environment(), type, zero_reg, Operand(zero_reg));
5646 void LCodeGen::DoDummy(LDummy* instr) {
5651 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5657 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5658 LoadContextFromDeferred(instr->context());
5659 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
5660 RecordSafepointWithLazyDeopt(
5661 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5662 ASSERT(instr->HasEnvironment());
5664 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5668 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5671 DeferredStackCheck(
LCodeGen* codegen, LStackCheck* instr)
5674 codegen()->DoDeferredStackCheck(instr_);
5676 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5678 LStackCheck* instr_;
5681 ASSERT(instr->HasEnvironment());
5685 if (instr->hydrogen()->is_function_entry()) {
5688 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5689 __ Branch(&done,
hs,
sp, Operand(at));
5690 ASSERT(instr->context()->IsRegister());
5692 CallCode(isolate()->builtins()->StackCheck(),
5693 RelocInfo::CODE_TARGET,
5697 ASSERT(instr->hydrogen()->is_backwards_branch());
5699 DeferredStackCheck* deferred_stack_check =
5700 new(zone()) DeferredStackCheck(
this, instr);
5701 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5702 __ Branch(deferred_stack_check->entry(),
lo,
sp, Operand(at));
5704 __ bind(instr->done_label());
5705 deferred_stack_check->SetExit(instr->done_label());
5706 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5714 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5722 ASSERT(!environment->HasBeenRegistered());
5723 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5725 GenerateOsrPrologue();
5729 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5730 Register result =
ToRegister(instr->result());
5731 Register
object =
ToRegister(instr->object());
5732 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5733 DeoptimizeIf(
eq, instr->environment(), object, Operand(at));
5735 Register null_value = t1;
5736 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5737 DeoptimizeIf(
eq, instr->environment(), object, Operand(null_value));
5740 DeoptimizeIf(
eq, instr->environment(), at, Operand(zero_reg));
5743 __ GetObjectType(
object, a1, a1);
5746 Label use_cache, call_runtime;
5748 __ CheckEnumCache(null_value, &call_runtime);
5751 __ Branch(&use_cache);
5754 __ bind(&call_runtime);
5756 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5760 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5761 DeoptimizeIf(
ne, instr->environment(), a1, Operand(at));
5762 __ bind(&use_cache);
5766 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5768 Register result =
ToRegister(instr->result());
5769 Label load_cache, done;
5770 __ EnumLength(result, map);
5772 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5775 __ bind(&load_cache);
5776 __ LoadInstanceDescriptors(map, result);
5781 DeoptimizeIf(
eq, instr->environment(), result, Operand(zero_reg));
5787 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5788 Register
object =
ToRegister(instr->value());
5791 DeoptimizeIf(
ne, instr->environment(),
map, Operand(scratch0()));
5795 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5796 Register
object =
ToRegister(instr->object());
5798 Register result =
ToRegister(instr->result());
5799 Register scratch = scratch0();
5801 Label out_of_object, done;
5806 __ Addu(scratch,
object, scratch);
5811 __ bind(&out_of_object);
5814 __ Subu(scratch, result, scratch);
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static DwVfpRegister FromAllocationIndex(int index)
static const int kLengthOffset
void FinishCode(Handle< Code > code)
static const int kHashFieldOffset
static const int kBitFieldOffset
MemOperand ToHighMemOperand(LOperand *op) const
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
const intptr_t kSmiTagMask
static const int kCodeEntryOffset
static const int kPrototypeOrInitialMapOffset
static const int kValueOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
static const int kEnumCacheOffset
const uint32_t kTwoByteStringTag
int StackSlotOffset(int index)
RegisterType type() const
static Smi * FromInt(int value)
Smi * ToSmi(LConstantOperand *op) const
bool NeedsEagerFrame() const
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
Register EmitLoadRegister(LOperand *op, Register scratch)
static Handle< Code > GetUninitialized(Isolate *isolate, Token::Value op)
static const int kDataOffset
bool IsSmi(LConstantOperand *op) const
static Handle< T > cast(Handle< S > that)
static const int kGlobalReceiverOffset
static Representation Integer32()
static const int kExponentBias
static const unsigned int kContainsCachedArrayIndexMask
AllocationSiteOverrideMode
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
#define ASSERT(condition)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
static const int kContextOffset
const int kPointerSizeLog2
static const int kInObjectFieldCount
const uint32_t kStringRepresentationMask
MemOperand GlobalObjectOperand()
static const int kCallerFPOffset
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
static Handle< Code > initialize_stub(Isolate *isolate, ExtraICState extra_state)
static const int kInstanceClassNameOffset
int WhichPowerOf2(uint32_t x)
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Variable * parameter(int index) const
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
void DoDeferredStackCheck(LStackCheck *instr)
DwVfpRegister EmitLoadDoubleRegister(LOperand *op, SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch)
int LookupDestination(int block_id) const
Condition ReverseCondition(Condition cond)
Operand ToOperand(LOperand *op)
const uint32_t kSlotsZapValue
int32_t WhichPowerOf2Abs(int32_t x)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
friend class LEnvironment
static const int kLengthOffset
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kExponentShift
static const int kValueOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kHoleNanUpper32
static const int kDontAdaptArgumentsSentinel
void DoDeferredNumberTagD(LNumberTagD *instr)
static uint32_t update(uint32_tprevious, AllocationSpacevalue)
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
void check(i::Vector< const uint8_t > string)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
static void MaybeCallEntryHook(MacroAssembler *masm)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
const uint32_t kHoleNanLower32
static const int kMaxRegularHeapObjectSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
static const int kCallerSPOffset
static const int kCacheStampOffset
bool IsFixedTypedArrayElementsKind(ElementsKind kind)
static const int kPropertiesOffset
int32_t ToInteger32(LConstantOperand *op) const
int num_parameters() const
bool IsInteger32(LConstantOperand *op) const
static const int kMarkerOffset
bool IsFastSmiElementsKind(ElementsKind kind)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
Handle< Object > ToHandle(LConstantOperand *op) const
static const int kHeaderSize
#define STATIC_ASCII_VECTOR(x)
friend class BlockTrampolinePoolScope
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
static int OffsetOfElementAt(int index)
SwVfpRegister low() const
static int SizeFor(int length)
bool NeedsDeferredFrame() const
static const int kHeaderSize
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
static const int kMapOffset
static const int kValueOffset
static const int kFixedFrameSizeFromFp
virtual ~SafepointGenerator()
Handle< T > handle(T *t, Isolate *isolate)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
MemOperand FieldMemOperand(Register object, int offset)
static const int kHasNonInstancePrototype
void WriteTranslation(LEnvironment *environment, Translation *translation)
static const int kFunctionOffset
static const uint32_t kSignMask
static const int kNotDeoptimizationEntry
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
static const int kHeaderSize
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
MemOperand PrepareKeyedOperand(Register key, Register base, bool key_is_constant, int constant_key, int element_size, int shift_size, int additional_index, int additional_offset)
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
static const int kConstructorOffset
const uint32_t kOneByteStringTag
static const int kIsUndetectable
virtual void AfterCall() const V8_OVERRIDE
static const int kHeaderSize
Register ToRegister(LOperand *op) const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
void DoDeferredTaggedToI(LTaggedToI *instr)
static const int kInstrSize
static const int kPrototypeOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
#define RUNTIME_ENTRY(name, nargs, ressize)
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
MemOperand ToMemOperand(LOperand *op) const
bool IsNextEmittedBlock(int block_id) const
static const int kExponentBits
static const int kCompilerHintsOffset
RAStatus GetRAState() const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
static const int kSharedFunctionInfoOffset
static const int kMaxValue
friend class SafepointGenerator
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
static const int kExponentOffset
bool EvalComparison(Token::Value op, double op1, double op2)
static uint32_t encode(boolvalue)
const uint32_t kStringEncodingMask
static const int kInstanceTypeOffset
virtual void BeforeCall(int call_size) const V8_OVERRIDE
static const int kMantissaOffset
friend class LDeferredCode