39 class SafepointGenerator :
public CallWrapper {
43 Safepoint::DeoptMode mode)
52 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 Safepoint::DeoptMode deopt_mode_;
64 bool LCodeGen::GenerateCode() {
65 HPhase phase(
"Z_Code generation", chunk());
68 CpuFeatures::Scope scope(
FPU);
70 CodeStub::GenerateFPStubs();
77 return GeneratePrologue() &&
79 GenerateDeferredCode() &&
80 GenerateSafepointTable();
84 void LCodeGen::FinishCode(Handle<Code>
code) {
86 code->set_stack_slots(GetStackSlotCount());
87 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
88 PopulateDeoptimizationData(code);
92 void LChunkBuilder::Abort(
const char* reason) {
93 info()->set_bailout_reason(reason);
98 void LCodeGen::Comment(
const char* format, ...) {
99 if (!FLAG_code_comments)
return;
101 StringBuilder builder(buffer,
ARRAY_SIZE(buffer));
103 va_start(arguments, format);
104 builder.AddFormattedList(format, arguments);
109 size_t length = builder.position();
111 memcpy(copy.start(), builder.Finalize(), copy.length());
112 masm()->RecordComment(copy.start());
116 bool LCodeGen::GeneratePrologue() {
122 if (strlen(FLAG_stop_at) > 0 &&
123 info_->function()->name()->IsEqualTo(
CStrVector(FLAG_stop_at))) {
137 if (!info_->is_classic_mode() || info_->is_native()) {
139 __ Branch(&ok,
eq, t1, Operand(zero_reg));
141 int receiver_offset = scope()->num_parameters() *
kPointerSize;
142 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
151 int slots = GetStackSlotCount();
153 if (FLAG_debug_code) {
154 __ li(a0, Operand(slots));
160 __ Branch(&loop,
ne, a0, Operand(zero_reg));
168 if (heap_slots > 0) {
169 Comment(
";;; Allocate local context");
173 FastNewContextStub stub(heap_slots);
176 __ CallRuntime(Runtime::kNewFunctionContext, 1);
178 RecordSafepoint(Safepoint::kNoLazyDeopt);
183 int num_parameters = scope()->num_parameters();
184 for (
int i = 0; i < num_parameters; i++) {
185 Variable* var = scope()->parameter(i);
186 if (var->IsContextSlot()) {
195 __ RecordWriteContextSlot(
199 Comment(
";;; End allocate local context");
204 __ CallRuntime(Runtime::kTraceEnter, 0);
206 EnsureSpaceForLazyDeopt();
207 return !is_aborted();
211 bool LCodeGen::GenerateBody() {
213 bool emit_instructions =
true;
214 for (current_instruction_ = 0;
215 !is_aborted() && current_instruction_ < instructions_->length();
216 current_instruction_++) {
217 LInstruction* instr = instructions_->at(current_instruction_);
218 if (instr->IsLabel()) {
220 emit_instructions = !label->HasReplacement();
223 if (emit_instructions) {
224 Comment(
";;; @%d: %s.", current_instruction_, instr->Mnemonic());
225 instr->CompileToNative(
this);
228 return !is_aborted();
232 bool LCodeGen::GenerateDeferredCode() {
234 if (deferred_.length() > 0) {
235 for (
int i = 0; !is_aborted() && i < deferred_.length(); i++) {
236 LDeferredCode* code = deferred_[i];
237 __ bind(code->entry());
238 Comment(
";;; Deferred code @%d: %s.",
239 code->instruction_index(),
240 code->instr()->Mnemonic());
242 __ jmp(code->exit());
247 if (!is_aborted()) status_ =
DONE;
248 return !is_aborted();
252 bool LCodeGen::GenerateDeoptJumpTable() {
255 Abort(
"Unimplemented: GenerateDeoptJumpTable");
260 bool LCodeGen::GenerateSafepointTable() {
262 safepoints_.Emit(masm(), GetStackSlotCount());
263 return !is_aborted();
283 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
284 if (op->IsRegister()) {
286 }
else if (op->IsConstantOperand()) {
288 HConstant* constant = chunk_->LookupConstant(const_op);
289 Handle<Object> literal = constant->handle();
290 Representation r = chunk_->LookupLiteralRepresentation(const_op);
291 if (r.IsInteger32()) {
292 ASSERT(literal->IsNumber());
293 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
294 }
else if (r.IsDouble()) {
295 Abort(
"EmitLoadRegister: Unsupported double immediate.");
298 if (literal->IsSmi()) {
299 __ li(scratch, Operand(literal));
301 __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
305 }
else if (op->IsStackSlot() || op->IsArgument()) {
306 __ lw(scratch, ToMemOperand(op));
315 ASSERT(op->IsDoubleRegister());
316 return ToDoubleRegister(op->index());
323 if (op->IsDoubleRegister()) {
324 return ToDoubleRegister(op->index());
325 }
else if (op->IsConstantOperand()) {
327 HConstant* constant = chunk_->LookupConstant(const_op);
328 Handle<Object> literal = constant->handle();
329 Representation r = chunk_->LookupLiteralRepresentation(const_op);
330 if (r.IsInteger32()) {
331 ASSERT(literal->IsNumber());
332 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
333 __ mtc1(at, flt_scratch);
334 __ cvt_d_w(dbl_scratch, flt_scratch);
336 }
else if (r.IsDouble()) {
337 Abort(
"unsupported double immediate");
338 }
else if (r.IsTagged()) {
339 Abort(
"unsupported tagged immediate");
341 }
else if (op->IsStackSlot() || op->IsArgument()) {
343 __ ldc1(dbl_scratch, mem_op);
351 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op)
const {
352 HConstant* constant = chunk_->LookupConstant(op);
353 ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
354 return constant->handle();
358 bool LCodeGen::IsInteger32(LConstantOperand* op)
const {
359 return chunk_->LookupLiteralRepresentation(op).IsInteger32();
363 int LCodeGen::ToInteger32(LConstantOperand* op)
const {
364 HConstant* constant = chunk_->LookupConstant(op);
365 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
366 ASSERT(constant->HasInteger32Value());
367 return constant->Integer32Value();
371 double LCodeGen::ToDouble(LConstantOperand* op)
const {
372 HConstant* constant = chunk_->LookupConstant(op);
373 ASSERT(constant->HasDoubleValue());
374 return constant->DoubleValue();
378 Operand LCodeGen::ToOperand(LOperand* op) {
379 if (op->IsConstantOperand()) {
381 HConstant* constant = chunk()->LookupConstant(const_op);
382 Representation r = chunk_->LookupLiteralRepresentation(const_op);
383 if (r.IsInteger32()) {
384 ASSERT(constant->HasInteger32Value());
385 return Operand(constant->Integer32Value());
386 }
else if (r.IsDouble()) {
387 Abort(
"ToOperand Unsupported double immediate.");
390 return Operand(constant->handle());
391 }
else if (op->IsRegister()) {
393 }
else if (op->IsDoubleRegister()) {
394 Abort(
"ToOperand IsDoubleRegister unimplemented");
403 MemOperand LCodeGen::ToMemOperand(LOperand* op)
const {
404 ASSERT(!op->IsRegister());
405 ASSERT(!op->IsDoubleRegister());
406 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
407 int index = op->index();
419 MemOperand LCodeGen::ToHighMemOperand(LOperand* op)
const {
420 ASSERT(op->IsDoubleStackSlot());
421 int index = op->index();
434 void LCodeGen::WriteTranslation(LEnvironment* environment,
435 Translation* translation,
436 int* arguments_index,
437 int* arguments_count) {
438 if (environment ==
NULL)
return;
441 int translation_size = environment->values()->length();
443 int height = translation_size - environment->parameter_count();
449 *arguments_index = -environment->parameter_count();
450 *arguments_count = environment->parameter_count();
452 WriteTranslation(environment->outer(),
456 int closure_id = *info()->closure() != *environment->closure()
457 ? DefineDeoptimizationLiteral(environment->closure())
458 : Translation::kSelfLiteralId;
460 switch (environment->frame_type()) {
462 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
465 translation->BeginConstructStubFrame(closure_id, translation_size);
468 ASSERT(translation_size == 1);
470 translation->BeginGetterStubFrame(closure_id);
473 ASSERT(translation_size == 2);
475 translation->BeginSetterStubFrame(closure_id);
478 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
484 if (environment->entry() !=
NULL &&
485 environment->entry()->arguments_pushed()) {
486 *arguments_index = *arguments_index < 0
487 ? GetStackSlotCount()
488 : *arguments_index + *arguments_count;
489 *arguments_count = environment->entry()->arguments_count() + 1;
492 for (
int i = 0; i < translation_size; ++i) {
493 LOperand* value = environment->values()->at(i);
496 if (environment->spilled_registers() !=
NULL && value !=
NULL) {
497 if (value->IsRegister() &&
498 environment->spilled_registers()[value->index()] !=
NULL) {
499 translation->MarkDuplicate();
500 AddToTranslation(translation,
501 environment->spilled_registers()[value->index()],
502 environment->HasTaggedValueAt(i),
503 environment->HasUint32ValueAt(i),
507 value->IsDoubleRegister() &&
508 environment->spilled_double_registers()[value->index()] !=
NULL) {
509 translation->MarkDuplicate();
512 environment->spilled_double_registers()[value->index()],
520 AddToTranslation(translation,
522 environment->HasTaggedValueAt(i),
523 environment->HasUint32ValueAt(i),
530 void LCodeGen::AddToTranslation(Translation* translation,
535 int arguments_count) {
540 translation->StoreArgumentsObject(arguments_index, arguments_count);
541 }
else if (op->IsStackSlot()) {
543 translation->StoreStackSlot(op->index());
544 }
else if (is_uint32) {
545 translation->StoreUint32StackSlot(op->index());
547 translation->StoreInt32StackSlot(op->index());
549 }
else if (op->IsDoubleStackSlot()) {
550 translation->StoreDoubleStackSlot(op->index());
551 }
else if (op->IsArgument()) {
553 int src_index = GetStackSlotCount() + op->index();
554 translation->StoreStackSlot(src_index);
555 }
else if (op->IsRegister()) {
558 translation->StoreRegister(reg);
559 }
else if (is_uint32) {
560 translation->StoreUint32Register(reg);
562 translation->StoreInt32Register(reg);
564 }
else if (op->IsDoubleRegister()) {
566 translation->StoreDoubleRegister(reg);
567 }
else if (op->IsConstantOperand()) {
569 int src_index = DefineDeoptimizationLiteral(constant->handle());
570 translation->StoreLiteral(src_index);
577 void LCodeGen::CallCode(Handle<Code> code,
578 RelocInfo::Mode mode,
579 LInstruction* instr) {
580 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
584 void LCodeGen::CallCodeGeneric(Handle<Code> code,
585 RelocInfo::Mode mode,
587 SafepointMode safepoint_mode) {
589 LPointerMap* pointers = instr->pointer_map();
590 RecordPosition(pointers->position());
592 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
596 void LCodeGen::CallRuntime(
const Runtime::Function*
function,
598 LInstruction* instr) {
600 LPointerMap* pointers = instr->pointer_map();
602 RecordPosition(pointers->position());
604 __ CallRuntime(
function, num_arguments);
605 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
611 LInstruction* instr) {
612 __ CallRuntimeSaveDoubles(
id);
613 RecordSafepointWithRegisters(
614 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
618 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
619 Safepoint::DeoptMode mode) {
620 if (!environment->HasBeenRegistered()) {
635 int jsframe_count = 0;
638 for (LEnvironment* e = environment; e !=
NULL; e = e->outer()) {
644 Translation translation(&translations_, frame_count, jsframe_count, zone());
645 WriteTranslation(environment, &translation, &args_index, &args_count);
646 int deoptimization_index = deoptimizations_.length();
647 int pc_offset = masm()->pc_offset();
648 environment->Register(deoptimization_index,
650 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
651 deoptimizations_.Add(environment, zone());
657 LEnvironment* environment,
659 const Operand& src2) {
660 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
661 ASSERT(environment->HasBeenRegistered());
662 int id = environment->deoptimization_index();
665 Abort(
"bailout was not prepared");
669 ASSERT(FLAG_deopt_every_n_times < 2);
671 if (FLAG_deopt_every_n_times == 1 &&
672 info_->shared_info()->opt_count() == id) {
677 if (FLAG_trap_on_deopt) {
682 __ stop(
"trap_on_deopt");
692 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
693 int length = deoptimizations_.length();
694 if (length == 0)
return;
695 Handle<DeoptimizationInputData> data =
696 factory()->NewDeoptimizationInputData(length,
TENURED);
698 Handle<ByteArray> translations = translations_.CreateByteArray();
699 data->SetTranslationByteArray(*translations);
700 data->SetInlinedFunctionCount(
Smi::FromInt(inlined_function_count_));
702 Handle<FixedArray> literals =
703 factory()->NewFixedArray(deoptimization_literals_.length(),
TENURED);
704 for (
int i = 0; i < deoptimization_literals_.length(); i++) {
705 literals->set(i, *deoptimization_literals_[i]);
707 data->SetLiteralArray(*literals);
709 data->SetOsrAstId(
Smi::FromInt(info_->osr_ast_id().ToInt()));
713 for (
int i = 0; i < length; i++) {
714 LEnvironment* env = deoptimizations_[i];
715 data->SetAstId(i, env->ast_id());
716 data->SetTranslationIndex(i,
Smi::FromInt(env->translation_index()));
717 data->SetArgumentsStackHeight(i,
721 code->set_deoptimization_data(*data);
725 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
726 int result = deoptimization_literals_.length();
727 for (
int i = 0; i < deoptimization_literals_.length(); ++i) {
728 if (deoptimization_literals_[i].is_identical_to(literal))
return i;
730 deoptimization_literals_.Add(literal, zone());
735 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
736 ASSERT(deoptimization_literals_.length() == 0);
738 const ZoneList<Handle<JSFunction> >* inlined_closures =
739 chunk()->inlined_closures();
741 for (
int i = 0, length = inlined_closures->length();
744 DefineDeoptimizationLiteral(inlined_closures->at(i));
747 inlined_function_count_ = deoptimization_literals_.length();
751 void LCodeGen::RecordSafepointWithLazyDeopt(
752 LInstruction* instr, SafepointMode safepoint_mode) {
753 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
754 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
756 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
757 RecordSafepointWithRegisters(
758 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
763 void LCodeGen::RecordSafepoint(
764 LPointerMap* pointers,
765 Safepoint::Kind kind,
767 Safepoint::DeoptMode deopt_mode) {
768 ASSERT(expected_safepoint_kind_ == kind);
770 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
771 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
772 kind, arguments, deopt_mode);
773 for (
int i = 0; i < operands->length(); i++) {
774 LOperand* pointer = operands->at(i);
775 if (pointer->IsStackSlot()) {
776 safepoint.DefinePointerSlot(pointer->index(), zone());
777 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
778 safepoint.DefinePointerRegister(
ToRegister(pointer), zone());
781 if (kind & Safepoint::kWithRegisters) {
783 safepoint.DefinePointerRegister(
cp, zone());
788 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
789 Safepoint::DeoptMode deopt_mode) {
790 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
794 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
795 LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
796 RecordSafepoint(&empty_pointers, deopt_mode);
800 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
802 Safepoint::DeoptMode deopt_mode) {
804 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
808 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
809 LPointerMap* pointers,
811 Safepoint::DeoptMode deopt_mode) {
813 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
817 void LCodeGen::RecordPosition(
int position) {
818 if (position == RelocInfo::kNoPosition)
return;
819 masm()->positions_recorder()->RecordPosition(position);
823 void LCodeGen::DoLabel(LLabel* label) {
824 if (label->is_loop_header()) {
825 Comment(
";;; B%d - LOOP entry", label->block_id());
827 Comment(
";;; B%d", label->block_id());
829 __ bind(label->label());
830 current_block_ = label->block_id();
835 void LCodeGen::DoParallelMove(LParallelMove* move) {
836 resolver_.Resolve(move);
840 void LCodeGen::DoGap(LGap* gap) {
845 LParallelMove* move = gap->GetParallelMove(inner_pos);
846 if (move !=
NULL) DoParallelMove(move);
851 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
856 void LCodeGen::DoParameter(LParameter* instr) {
861 void LCodeGen::DoCallStub(LCallStub* instr) {
863 switch (instr->hydrogen()->major_key()) {
864 case CodeStub::RegExpConstructResult: {
865 RegExpConstructResultStub stub;
866 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
869 case CodeStub::RegExpExec: {
871 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
876 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
879 case CodeStub::NumberToString: {
880 NumberToStringStub stub;
881 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
884 case CodeStub::StringAdd: {
886 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
889 case CodeStub::StringCompare: {
890 StringCompareStub stub;
891 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
894 case CodeStub::TranscendentalCache: {
896 TranscendentalCacheStub stub(instr->transcendental_type(),
898 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
907 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
912 void LCodeGen::DoModI(LModI* instr) {
913 Register scratch = scratch0();
914 const Register left =
ToRegister(instr->left());
915 const Register result =
ToRegister(instr->result());
919 if (instr->hydrogen()->HasPowerOf2Divisor()) {
920 Register scratch = scratch0();
921 ASSERT(!left.is(scratch));
922 __ mov(scratch, left);
924 instr->hydrogen()->right())->Integer32Value();
927 p2constant = abs(p2constant);
929 Label positive_dividend;
931 __ subu(result, zero_reg, left);
932 __ And(result, result, p2constant - 1);
934 DeoptimizeIf(
eq, instr->environment(), result, Operand(zero_reg));
937 __ subu(result, zero_reg, result);
938 __ bind(&positive_dividend);
939 __ And(result, scratch, p2constant - 1);
942 Register right = EmitLoadRegister(instr->right(), scratch);
947 DeoptimizeIf(
eq, instr->environment(), right, Operand(zero_reg));
954 DeoptimizeIf(
eq, instr->environment(), result, Operand(zero_reg));
961 void LCodeGen::DoDivI(LDivI* instr) {
962 const Register left =
ToRegister(instr->left());
963 const Register right =
ToRegister(instr->right());
964 const Register result =
ToRegister(instr->result());
972 DeoptimizeIf(
eq, instr->environment(), right, Operand(zero_reg));
978 __ Branch(&left_not_zero,
ne, left, Operand(zero_reg));
979 DeoptimizeIf(
lt, instr->environment(), right, Operand(zero_reg));
980 __ bind(&left_not_zero);
985 Label left_not_min_int;
986 __ Branch(&left_not_min_int,
ne, left, Operand(
kMinInt));
987 DeoptimizeIf(
eq, instr->environment(), right, Operand(-1));
988 __ bind(&left_not_min_int);
992 DeoptimizeIf(
ne, instr->environment(), result, Operand(zero_reg));
997 void LCodeGen::DoMulI(LMulI* instr) {
998 Register scratch = scratch0();
999 Register result =
ToRegister(instr->result());
1002 LOperand* right_op = instr->right();
1005 bool bailout_on_minus_zero =
1008 if (right_op->IsConstantOperand() && !can_overflow) {
1012 if (bailout_on_minus_zero && (constant < 0)) {
1015 DeoptimizeIf(
eq, instr->environment(), left, Operand(zero_reg));
1020 __ Subu(result, zero_reg, left);
1023 if (bailout_on_minus_zero) {
1026 DeoptimizeIf(
lt, instr->environment(), left, Operand(zero_reg));
1028 __ mov(result, zero_reg);
1032 __ Move(result, left);
1038 int32_t mask = constant >> 31;
1039 uint32_t constant_abs = (constant + mask) ^ mask;
1046 __ sll(result, left, shift);
1049 __ sll(result, left, shift);
1050 __ Addu(result, result, left);
1053 __ sll(result, left, shift);
1054 __ Subu(result, result, left);
1059 __ Subu(result, zero_reg, result);
1064 __ li(at, constant);
1065 __ Mul(result, left, at);
1070 Register right = EmitLoadRegister(right_op, scratch);
1071 if (bailout_on_minus_zero) {
1077 __ mult(left, right);
1080 __ sra(at, result, 31);
1081 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(at));
1083 __ Mul(result, left, right);
1086 if (bailout_on_minus_zero) {
1089 __ Branch(&done,
ne, result, Operand(zero_reg));
1091 instr->environment(),
1100 void LCodeGen::DoBitI(LBitI* instr) {
1101 LOperand* left_op = instr->left();
1102 LOperand* right_op = instr->right();
1103 ASSERT(left_op->IsRegister());
1105 Register result =
ToRegister(instr->result());
1108 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1109 right = Operand(EmitLoadRegister(right_op, at));
1111 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1112 right = ToOperand(right_op);
1115 switch (instr->op()) {
1116 case Token::BIT_AND:
1117 __ And(result, left, right);
1120 __ Or(result, left, right);
1122 case Token::BIT_XOR:
1123 __ Xor(result, left, right);
1132 void LCodeGen::DoShiftI(LShiftI* instr) {
1135 LOperand* right_op = instr->right();
1137 Register result =
ToRegister(instr->result());
1139 if (right_op->IsRegister()) {
1142 switch (instr->op()) {
1148 if (instr->can_deopt()) {
1149 DeoptimizeIf(
lt, instr->environment(), result, Operand(zero_reg));
1162 uint8_t shift_count =
static_cast<uint8_t
>(value & 0x1F);
1163 switch (instr->op()) {
1165 if (shift_count != 0) {
1166 __ sra(result, left, shift_count);
1168 __ Move(result, left);
1172 if (shift_count != 0) {
1173 __ srl(result, left, shift_count);
1175 if (instr->can_deopt()) {
1176 __ And(at, left, Operand(0x80000000));
1177 DeoptimizeIf(
ne, instr->environment(), at, Operand(zero_reg));
1179 __ Move(result, left);
1183 if (shift_count != 0) {
1184 __ sll(result, left, shift_count);
1186 __ Move(result, left);
1197 void LCodeGen::DoSubI(LSubI* instr) {
1198 LOperand* left = instr->left();
1199 LOperand* right = instr->right();
1200 LOperand* result = instr->result();
1203 if (!can_overflow) {
1204 if (right->IsStackSlot() || right->IsArgument()) {
1205 Register right_reg = EmitLoadRegister(right, at);
1208 ASSERT(right->IsRegister() || right->IsConstantOperand());
1213 Register scratch = scratch1();
1214 if (right->IsStackSlot() ||
1215 right->IsArgument() ||
1216 right->IsConstantOperand()) {
1217 Register right_reg = EmitLoadRegister(right, scratch);
1223 ASSERT(right->IsRegister());
1231 DeoptimizeIf(
lt, instr->environment(),
overflow, Operand(zero_reg));
1236 void LCodeGen::DoConstantI(LConstantI* instr) {
1237 ASSERT(instr->result()->IsRegister());
1238 __ li(
ToRegister(instr->result()), Operand(instr->value()));
1242 void LCodeGen::DoConstantD(LConstantD* instr) {
1243 ASSERT(instr->result()->IsDoubleRegister());
1245 double v = instr->value();
1250 void LCodeGen::DoConstantT(LConstantT* instr) {
1251 Handle<Object> value = instr->value();
1252 if (value->IsSmi()) {
1256 Handle<HeapObject>::cast(value));
1261 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1262 Register result =
ToRegister(instr->result());
1268 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1269 Register result =
ToRegister(instr->result());
1275 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1276 Register result =
ToRegister(instr->result());
1278 __ EnumLength(result, map);
1282 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1283 Register result =
ToRegister(instr->result());
1296 void LCodeGen::DoValueOf(LValueOf* instr) {
1298 Register result =
ToRegister(instr->result());
1303 __ Move(result, input);
1304 __ JumpIfSmi(input, &done);
1307 __ GetObjectType(input, map, map);
1315 void LCodeGen::DoDateField(LDateField* instr) {
1317 Register result =
ToRegister(instr->result());
1318 Register scratch =
ToRegister(instr->temp());
1319 Smi* index = instr->index();
1320 Label runtime, done;
1323 ASSERT(!scratch.is(scratch0()));
1324 ASSERT(!scratch.is(
object));
1327 DeoptimizeIf(
eq, instr->environment(), at, Operand(zero_reg));
1328 __ GetObjectType(
object, scratch, scratch);
1329 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(
JS_DATE_TYPE));
1331 if (index->value() == 0) {
1335 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1336 __ li(scratch, Operand(stamp));
1339 __ Branch(&runtime,
ne, scratch, Operand(scratch0()));
1345 __ PrepareCallCFunction(2, scratch);
1346 __ li(a1, Operand(index));
1347 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1353 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1355 Register result =
ToRegister(instr->result());
1356 __ Nor(result, zero_reg, Operand(input));
1360 void LCodeGen::DoThrow(LThrow* instr) {
1361 Register input_reg = EmitLoadRegister(instr->value(), at);
1363 CallRuntime(Runtime::kThrow, 1, instr);
1365 if (FLAG_debug_code) {
1366 __ stop(
"Unreachable code.");
1371 void LCodeGen::DoAddI(LAddI* instr) {
1372 LOperand* left = instr->left();
1373 LOperand* right = instr->right();
1374 LOperand* result = instr->result();
1377 if (!can_overflow) {
1378 if (right->IsStackSlot() || right->IsArgument()) {
1379 Register right_reg = EmitLoadRegister(right, at);
1382 ASSERT(right->IsRegister() || right->IsConstantOperand());
1386 Register overflow = scratch0();
1387 Register scratch = scratch1();
1388 if (right->IsStackSlot() ||
1389 right->IsArgument() ||
1390 right->IsConstantOperand()) {
1391 Register right_reg = EmitLoadRegister(right, scratch);
1397 ASSERT(right->IsRegister());
1405 DeoptimizeIf(
lt, instr->environment(),
overflow, Operand(zero_reg));
1410 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1411 LOperand* left = instr->left();
1412 LOperand* right = instr->right();
1415 if (instr->hydrogen()->representation().IsInteger32()) {
1417 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1419 : Operand(EmitLoadRegister(right, at));
1420 Register result_reg =
ToRegister(instr->result());
1421 Label return_right, done;
1422 if (!result_reg.is(left_reg)) {
1424 __ mov(result_reg, left_reg);
1427 __ Branch(&done, condition, left_reg, right_op);
1428 __ bind(&return_right);
1429 __ Addu(result_reg, zero_reg, right_op);
1432 ASSERT(instr->hydrogen()->representation().IsDouble());
1433 FPURegister left_reg = ToDoubleRegister(left);
1434 FPURegister right_reg = ToDoubleRegister(right);
1435 FPURegister result_reg = ToDoubleRegister(instr->result());
1436 Label check_nan_left, check_zero, return_left, return_right, done;
1437 __ BranchF(&check_zero, &check_nan_left,
eq, left_reg, right_reg);
1438 __ BranchF(&return_left,
NULL, condition, left_reg, right_reg);
1439 __ Branch(&return_right);
1441 __ bind(&check_zero);
1446 __ neg_d(left_reg, left_reg);
1447 __ sub_d(result_reg, left_reg, right_reg);
1448 __ neg_d(result_reg, result_reg);
1450 __ add_d(result_reg, left_reg, right_reg);
1454 __ bind(&check_nan_left);
1456 __ BranchF(
NULL, &return_left,
eq, left_reg, left_reg);
1457 __ bind(&return_right);
1458 if (!right_reg.is(result_reg)) {
1459 __ mov_d(result_reg, right_reg);
1463 __ bind(&return_left);
1464 if (!left_reg.is(result_reg)) {
1465 __ mov_d(result_reg, left_reg);
1472 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1476 switch (instr->op()) {
1478 __ add_d(result, left, right);
1481 __ sub_d(result, left, right);
1484 __ mul_d(result, left, right);
1487 __ div_d(result, left, right);
1491 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1492 __ MultiPush(saved_regs);
1494 __ PrepareCallCFunction(0, 2, scratch0());
1495 __ SetCallCDoubleArguments(left, right);
1497 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1500 __ GetCFunctionDoubleResult(result);
1503 __ MultiPop(saved_regs);
1513 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1519 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1526 int LCodeGen::GetNextEmittedBlock(
int block) {
1527 for (
int i = block + 1; i < graph()->blocks()->length(); ++i) {
1528 LLabel* label = chunk_->GetLabel(i);
1529 if (!label->HasReplacement())
return i;
1535 void LCodeGen::EmitBranch(
int left_block,
int right_block,
1536 Condition cc, Register src1,
const Operand& src2) {
1537 int next_block = GetNextEmittedBlock(current_block_);
1538 right_block = chunk_->LookupDestination(right_block);
1539 left_block = chunk_->LookupDestination(left_block);
1540 if (right_block == left_block) {
1541 EmitGoto(left_block);
1542 }
else if (left_block == next_block) {
1543 __ Branch(chunk_->GetAssemblyLabel(right_block),
1545 }
else if (right_block == next_block) {
1546 __ Branch(chunk_->GetAssemblyLabel(left_block),
cc, src1, src2);
1548 __ Branch(chunk_->GetAssemblyLabel(left_block),
cc, src1, src2);
1549 __ Branch(chunk_->GetAssemblyLabel(right_block));
1554 void LCodeGen::EmitBranchF(
int left_block,
int right_block,
1555 Condition cc, FPURegister src1, FPURegister src2) {
1556 int next_block = GetNextEmittedBlock(current_block_);
1557 right_block = chunk_->LookupDestination(right_block);
1558 left_block = chunk_->LookupDestination(left_block);
1559 if (right_block == left_block) {
1560 EmitGoto(left_block);
1561 }
else if (left_block == next_block) {
1562 __ BranchF(chunk_->GetAssemblyLabel(right_block),
NULL,
1564 }
else if (right_block == next_block) {
1565 __ BranchF(chunk_->GetAssemblyLabel(left_block),
NULL,
cc, src1, src2);
1567 __ BranchF(chunk_->GetAssemblyLabel(left_block),
NULL,
cc, src1, src2);
1568 __ Branch(chunk_->GetAssemblyLabel(right_block));
1573 void LCodeGen::DoBranch(LBranch* instr) {
1574 int true_block = chunk_->LookupDestination(instr->true_block_id());
1575 int false_block = chunk_->LookupDestination(instr->false_block_id());
1577 Representation r = instr->hydrogen()->value()->representation();
1578 if (r.IsInteger32()) {
1580 EmitBranch(true_block, false_block,
ne, reg, Operand(zero_reg));
1581 }
else if (r.IsDouble()) {
1588 HType type = instr->hydrogen()->value()->type();
1589 if (type.IsBoolean()) {
1590 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1591 EmitBranch(true_block, false_block,
eq, reg, Operand(at));
1592 }
else if (type.IsSmi()) {
1593 EmitBranch(true_block, false_block,
ne, reg, Operand(zero_reg));
1595 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1596 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1598 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1604 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1605 __ Branch(false_label,
eq, reg, Operand(at));
1609 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1610 __ Branch(true_label,
eq, reg, Operand(at));
1611 __ LoadRoot(at, Heap::kFalseValueRootIndex);
1612 __ Branch(false_label,
eq, reg, Operand(at));
1616 __ LoadRoot(at, Heap::kNullValueRootIndex);
1617 __ Branch(false_label,
eq, reg, Operand(at));
1622 __ Branch(false_label,
eq, reg, Operand(zero_reg));
1623 __ JumpIfSmi(reg, true_label);
1624 }
else if (expected.NeedsMap()) {
1627 DeoptimizeIf(
eq, instr->environment(), at, Operand(zero_reg));
1630 const Register map = scratch0();
1631 if (expected.NeedsMap()) {
1633 if (expected.CanBeUndetectable()) {
1637 __ Branch(false_label,
ne, at, Operand(zero_reg));
1653 __ Branch(true_label,
ne, at, Operand(zero_reg));
1654 __ Branch(false_label);
1655 __ bind(¬_string);
1661 Label not_heap_number;
1662 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1663 __ Branch(¬_heap_number,
ne, map, Operand(at));
1667 __ Branch(false_label);
1668 __ bind(¬_heap_number);
1672 DeoptimizeIf(
al, instr->environment(), zero_reg, Operand(zero_reg));
1678 void LCodeGen::EmitGoto(
int block) {
1679 block = chunk_->LookupDestination(block);
1680 int next_block = GetNextEmittedBlock(current_block_);
1681 if (block != next_block) {
1682 __ jmp(chunk_->GetAssemblyLabel(block));
1687 void LCodeGen::DoGoto(LGoto* instr) {
1688 EmitGoto(instr->block_id());
1696 case Token::EQ_STRICT:
1700 cond = is_unsigned ?
lo :
lt;
1703 cond = is_unsigned ?
hi :
gt;
1706 cond = is_unsigned ?
ls :
le;
1709 cond = is_unsigned ?
hs :
ge;
1712 case Token::INSTANCEOF:
1720 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1721 LOperand* left = instr->left();
1722 LOperand* right = instr->right();
1723 int false_block = chunk_->LookupDestination(instr->false_block_id());
1724 int true_block = chunk_->LookupDestination(instr->true_block_id());
1726 Condition cond = TokenToCondition(instr->op(),
false);
1728 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1735 EmitGoto(next_block);
1737 if (instr->is_double()) {
1740 FPURegister left_reg = ToDoubleRegister(left);
1741 FPURegister right_reg = ToDoubleRegister(right);
1745 __ BranchF(
NULL, chunk_->GetAssemblyLabel(false_block),
eq,
1746 left_reg, right_reg);
1748 EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
1751 Operand cmp_right = Operand(0);
1753 if (right->IsConstantOperand()) {
1756 }
else if (left->IsConstantOperand()) {
1766 EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
1772 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1775 int false_block = chunk_->LookupDestination(instr->false_block_id());
1776 int true_block = chunk_->LookupDestination(instr->true_block_id());
1778 EmitBranch(true_block, false_block,
eq, left, Operand(right));
1782 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1784 int true_block = chunk_->LookupDestination(instr->true_block_id());
1785 int false_block = chunk_->LookupDestination(instr->false_block_id());
1787 EmitBranch(true_block, false_block,
eq, left,
1788 Operand(instr->hydrogen()->right()));
1793 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1794 Register scratch = scratch0();
1796 int false_block = chunk_->LookupDestination(instr->false_block_id());
1800 if (instr->hydrogen()->representation().IsSpecialization() ||
1801 instr->hydrogen()->type().IsSmi()) {
1802 EmitGoto(false_block);
1806 int true_block = chunk_->LookupDestination(instr->true_block_id());
1809 Heap::kNullValueRootIndex :
1810 Heap::kUndefinedValueRootIndex;
1811 __ LoadRoot(at, nil_value);
1813 EmitBranch(true_block, false_block,
eq, reg, Operand(at));
1816 Heap::kUndefinedValueRootIndex :
1817 Heap::kNullValueRootIndex;
1818 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1819 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1821 __ LoadRoot(at, other_nil_value);
1823 __ JumpIfSmi(reg, false_label);
1829 EmitBranch(true_block, false_block,
ne, scratch, Operand(zero_reg));
1834 Condition LCodeGen::EmitIsObject(Register input,
1837 Label* is_not_object,
1839 __ JumpIfSmi(input, is_not_object);
1841 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1842 __ Branch(is_object,
eq, input, Operand(temp2));
1849 __ Branch(is_not_object,
ne, temp2, Operand(zero_reg));
1853 __ Branch(is_not_object,
1860 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1863 Register temp2 = scratch0();
1865 int true_block = chunk_->LookupDestination(instr->true_block_id());
1866 int false_block = chunk_->LookupDestination(instr->false_block_id());
1867 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1868 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1871 EmitIsObject(reg, temp1, temp2, false_label, true_label);
1873 EmitBranch(true_block, false_block, true_cond, temp2,
1878 Condition LCodeGen::EmitIsString(Register input,
1880 Label* is_not_string) {
1881 __ JumpIfSmi(input, is_not_string);
1882 __ GetObjectType(input, temp1, temp1);
1888 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1892 int true_block = chunk_->LookupDestination(instr->true_block_id());
1893 int false_block = chunk_->LookupDestination(instr->false_block_id());
1894 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1897 EmitIsString(reg, temp1, false_label);
1899 EmitBranch(true_block, false_block, true_cond, temp1,
1904 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1905 int true_block = chunk_->LookupDestination(instr->true_block_id());
1906 int false_block = chunk_->LookupDestination(instr->false_block_id());
1908 Register input_reg = EmitLoadRegister(instr->value(), at);
1910 EmitBranch(true_block, false_block,
eq, at, Operand(zero_reg));
1914 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1918 int true_block = chunk_->LookupDestination(instr->true_block_id());
1919 int false_block = chunk_->LookupDestination(instr->false_block_id());
1921 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1925 EmitBranch(true_block, false_block,
ne, at, Operand(zero_reg));
1931 case Token::EQ_STRICT:
1949 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1951 int true_block = chunk_->LookupDestination(instr->true_block_id());
1952 int false_block = chunk_->LookupDestination(instr->false_block_id());
1955 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1957 Condition condition = ComputeCompareCondition(op);
1959 EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
1963 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1972 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1975 if (from == to)
return eq;
1983 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1984 Register scratch = scratch0();
1987 int true_block = chunk_->LookupDestination(instr->true_block_id());
1988 int false_block = chunk_->LookupDestination(instr->false_block_id());
1990 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1992 __ JumpIfSmi(input, false_label);
1994 __ GetObjectType(input, scratch, scratch);
1995 EmitBranch(true_block,
1997 BranchCondition(instr->hydrogen()),
1999 Operand(TestType(instr->hydrogen())));
2003 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2005 Register result =
ToRegister(instr->result());
2007 __ AssertString(input);
2010 __ IndexFromHash(result, result);
2014 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2015 LHasCachedArrayIndexAndBranch* instr) {
2017 Register scratch = scratch0();
2019 int true_block = chunk_->LookupDestination(instr->true_block_id());
2020 int false_block = chunk_->LookupDestination(instr->false_block_id());
2025 EmitBranch(true_block, false_block,
eq, at, Operand(zero_reg));
2031 void LCodeGen::EmitClassOfTest(Label* is_true,
2033 Handle<String>class_name,
2038 ASSERT(!input.is(temp2));
2041 __ JumpIfSmi(input, is_false);
2043 if (class_name->IsEqualTo(
CStrVector(
"Function"))) {
2053 __ GetObjectType(input, temp, temp2);
2060 __ GetObjectType(input, temp, temp2);
2071 __ GetObjectType(temp, temp2, temp2);
2072 if (class_name->IsEqualTo(
CStrVector(
"Object"))) {
2095 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2097 Register temp = scratch0();
2099 Handle<String> class_name = instr->hydrogen()->class_name();
2101 int true_block = chunk_->LookupDestination(instr->true_block_id());
2102 int false_block = chunk_->LookupDestination(instr->false_block_id());
2104 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2105 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2107 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2109 EmitBranch(true_block, false_block,
eq, temp, Operand(class_name));
2113 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2116 int true_block = instr->true_block_id();
2117 int false_block = instr->false_block_id();
2120 EmitBranch(true_block, false_block,
eq, temp, Operand(instr->map()));
2124 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2125 Label true_label, done;
2128 Register result =
ToRegister(instr->result());
2132 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2134 __ Branch(&true_label,
eq, result, Operand(zero_reg));
2135 __ li(result, Operand(factory()->false_value()));
2137 __ bind(&true_label);
2138 __ li(result, Operand(factory()->true_value()));
2143 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2144 class DeferredInstanceOfKnownGlobal:
public LDeferredCode {
2146 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2147 LInstanceOfKnownGlobal* instr)
2148 : LDeferredCode(codegen), instr_(instr) { }
2149 virtual void Generate() {
2150 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2152 virtual LInstruction* instr() {
return instr_; }
2153 Label* map_check() {
return &map_check_; }
2156 LInstanceOfKnownGlobal* instr_;
2160 DeferredInstanceOfKnownGlobal* deferred;
2161 deferred =
new(zone()) DeferredInstanceOfKnownGlobal(
this, instr);
2163 Label done, false_result;
2164 Register
object =
ToRegister(instr->value());
2166 Register result =
ToRegister(instr->result());
2172 __ JumpIfSmi(
object, &false_result);
2178 Register map = temp;
2182 __ bind(deferred->map_check());
2186 Handle<JSGlobalPropertyCell> cell =
2187 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2188 __ li(at, Operand(Handle<Object>(cell)));
2190 __ Branch(&cache_miss,
ne, map, Operand(at));
2199 __ bind(&cache_miss);
2201 __ LoadRoot(temp, Heap::kNullValueRootIndex);
2202 __ Branch(&false_result,
eq,
object, Operand(temp));
2205 Condition cc =
__ IsObjectStringType(
object, temp, temp);
2206 __ Branch(&false_result, cc, temp, Operand(zero_reg));
2209 __ Branch(deferred->entry());
2211 __ bind(&false_result);
2212 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2216 __ bind(deferred->exit());
2221 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2223 Register result =
ToRegister(instr->result());
2233 InstanceofStub stub(flags);
2235 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
2243 static const int kAdditionalDelta = 7;
2244 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2245 Label before_push_delta;
2246 __ bind(&before_push_delta);
2250 __ StoreToSafepointRegisterSlot(temp, temp);
2252 CallCodeGeneric(stub.GetCode(),
2253 RelocInfo::CODE_TARGET,
2255 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2256 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2257 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2260 __ StoreToSafepointRegisterSlot(result, result);
2264 void LCodeGen::DoCmpT(LCmpT* instr) {
2268 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2271 Condition condition = ComputeCompareCondition(op);
2277 __ LoadRoot(
ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2278 __ LoadRoot(
ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2279 ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
2284 void LCodeGen::DoReturn(LReturn* instr) {
2289 __ CallRuntime(Runtime::kTraceExit, 1);
2294 __ Addu(
sp,
sp, Operand(sp_delta));
2299 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2300 Register result =
ToRegister(instr->result());
2301 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2303 if (instr->hydrogen()->RequiresHoleCheck()) {
2304 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2305 DeoptimizeIf(
eq, instr->environment(), result, Operand(at));
2310 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2314 __ li(a2, Operand(instr->name()));
2315 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2316 : RelocInfo::CODE_TARGET_CONTEXT;
2317 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2318 CallCode(ic, mode, instr);
2322 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2324 Register cell = scratch0();
2327 __ li(cell, Operand(instr->hydrogen()->cell()));
2333 if (instr->hydrogen()->RequiresHoleCheck()) {
2335 Register payload =
ToRegister(instr->temp());
2337 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2338 DeoptimizeIf(
eq, instr->environment(), payload, Operand(at));
2347 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2351 __ li(a2, Operand(instr->name()));
2352 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
2353 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2354 : isolate()->builtins()->StoreIC_Initialize();
2355 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2359 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2360 Register context =
ToRegister(instr->context());
2361 Register result =
ToRegister(instr->result());
2364 if (instr->hydrogen()->RequiresHoleCheck()) {
2365 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2367 if (instr->hydrogen()->DeoptimizesOnHole()) {
2368 DeoptimizeIf(
eq, instr->environment(), result, Operand(at));
2371 __ Branch(&is_not_hole,
ne, result, Operand(at));
2372 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2373 __ bind(&is_not_hole);
2379 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2380 Register context =
ToRegister(instr->context());
2382 Register scratch = scratch0();
2385 Label skip_assignment;
2387 if (instr->hydrogen()->RequiresHoleCheck()) {
2388 __ lw(scratch, target);
2389 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2391 if (instr->hydrogen()->DeoptimizesOnHole()) {
2392 DeoptimizeIf(
eq, instr->environment(), scratch, Operand(at));
2394 __ Branch(&skip_assignment,
ne, scratch, Operand(at));
2398 __ sw(value, target);
2399 if (instr->hydrogen()->NeedsWriteBarrier()) {
2400 HType type = instr->hydrogen()->value()->type();
2403 __ RecordWriteContextSlot(context,
2413 __ bind(&skip_assignment);
2417 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2418 Register
object =
ToRegister(instr->object());
2419 Register result =
ToRegister(instr->result());
2420 if (instr->hydrogen()->is_in_object()) {
2429 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2432 Handle<String> name,
2433 LEnvironment* env) {
2434 LookupResult lookup(isolate());
2435 type->LookupDescriptor(
NULL, *name, &lookup);
2436 ASSERT(lookup.IsFound() || lookup.IsCacheable());
2437 if (lookup.IsField()) {
2438 int index = lookup.GetLocalFieldIndexFromMap(*type);
2449 }
else if (lookup.IsConstantFunction()) {
2450 Handle<JSFunction>
function(lookup.GetConstantFunctionFromMap(*type));
2451 __ LoadHeapObject(result,
function);
2456 Heap* heap = type->GetHeap();
2457 while (*current != heap->null_value()) {
2458 __ LoadHeapObject(result, current);
2460 DeoptimizeIf(
ne, env, result, Operand(Handle<Map>(current->map())));
2464 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2469 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2470 Register
object =
ToRegister(instr->object());
2471 Register result =
ToRegister(instr->result());
2472 Register object_map = scratch0();
2474 int map_count = instr->hydrogen()->types()->length();
2475 bool need_generic = instr->hydrogen()->need_generic();
2477 if (map_count == 0 && !need_generic) {
2478 DeoptimizeIf(
al, instr->environment());
2481 Handle<String> name = instr->hydrogen()->name();
2484 for (
int i = 0; i < map_count; ++i) {
2485 bool last = (i == map_count - 1);
2486 Handle<Map> map = instr->hydrogen()->types()->at(i);
2488 __ CompareMapAndBranch(
2489 object_map, map, &check_passed,
2491 if (last && !need_generic) {
2492 DeoptimizeIf(
al, instr->environment());
2493 __ bind(&check_passed);
2494 EmitLoadFieldOrConstantFunction(
2495 result,
object, map, name, instr->environment());
2499 __ bind(&check_passed);
2500 EmitLoadFieldOrConstantFunction(
2501 result,
object, map, name, instr->environment());
2507 __ li(a2, Operand(name));
2508 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2509 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2515 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2520 __ li(a2, Operand(instr->name()));
2521 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2522 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2526 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2527 Register scratch = scratch0();
2528 Register
function =
ToRegister(instr->function());
2529 Register result =
ToRegister(instr->result());
2533 __ GetObjectType(
function, result, scratch);
2540 __ Branch(&non_instance,
ne, scratch, Operand(zero_reg));
2547 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2548 DeoptimizeIf(
eq, instr->environment(), result, Operand(at));
2552 __ GetObjectType(result, scratch, scratch);
2561 __ bind(&non_instance);
2569 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2570 Register result =
ToRegister(instr->result());
2571 Register input =
ToRegister(instr->object());
2572 Register scratch = scratch0();
2575 if (FLAG_debug_code) {
2578 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2580 __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
2581 __ Branch(&done,
eq, scratch, Operand(at));
2586 __ Branch(&fail,
lt, scratch,
2588 __ Branch(&done,
le, scratch,
2590 __ Branch(&fail,
lt, scratch,
2592 __ Branch(&done,
le, scratch,
2595 __ Abort(
"Check for fast or external elements failed.");
2601 void LCodeGen::DoLoadExternalArrayPointer(
2602 LLoadExternalArrayPointer* instr) {
2603 Register to_reg =
ToRegister(instr->result());
2604 Register from_reg =
ToRegister(instr->object());
2610 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2611 Register arguments =
ToRegister(instr->arguments());
2612 Register length =
ToRegister(instr->length());
2614 Register result =
ToRegister(instr->result());
2617 __ subu(length, length, index);
2618 __ Addu(length, length, Operand(1));
2620 __ Addu(at, arguments, Operand(length));
2625 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2626 Register elements =
ToRegister(instr->elements());
2627 Register result =
ToRegister(instr->result());
2628 Register scratch = scratch0();
2629 Register store_base = scratch;
2632 if (instr->key()->IsConstantOperand()) {
2635 instr->additional_index());
2636 store_base = elements;
2638 Register key = EmitLoadRegister(instr->key(), scratch);
2643 if (instr->hydrogen()->key()->representation().IsTagged()) {
2645 __ addu(scratch, elements, scratch);
2648 __ addu(scratch, elements, scratch);
2655 if (instr->hydrogen()->RequiresHoleCheck()) {
2658 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(zero_reg));
2660 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2661 DeoptimizeIf(
eq, instr->environment(), result, Operand(scratch));
2667 void LCodeGen::DoLoadKeyedFastDoubleElement(
2668 LLoadKeyedFastDoubleElement* instr) {
2669 Register elements =
ToRegister(instr->elements());
2670 bool key_is_constant = instr->key()->IsConstantOperand();
2673 Register scratch = scratch0();
2676 int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
2677 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
2678 int constant_key = 0;
2679 if (key_is_constant) {
2681 if (constant_key & 0xF0000000) {
2682 Abort(
"array index constant value too big.");
2688 if (key_is_constant) {
2689 __ Addu(elements, elements,
2690 Operand(((constant_key + instr->additional_index()) <<
2691 element_size_shift) +
2694 __ sll(scratch, key, shift_size);
2695 __ Addu(elements, elements, Operand(scratch));
2696 __ Addu(elements, elements,
2698 (instr->additional_index() << element_size_shift)));
2701 if (instr->hydrogen()->RequiresHoleCheck()) {
2710 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
2712 bool key_is_constant,
2716 int additional_index,
2717 int additional_offset) {
2718 if (additional_index != 0 && !key_is_constant) {
2719 additional_index *= 1 << (element_size - shift_size);
2720 __ Addu(scratch0(), key, Operand(additional_index));
2723 if (key_is_constant) {
2725 (constant_key << element_size) + additional_offset);
2728 if (additional_index == 0) {
2729 if (shift_size >= 0) {
2730 __ sll(scratch0(), key, shift_size);
2731 __ Addu(scratch0(), base, scratch0());
2735 __ srl(scratch0(), key, 1);
2736 __ Addu(scratch0(), base, scratch0());
2741 if (shift_size >= 0) {
2742 __ sll(scratch0(), scratch0(), shift_size);
2743 __ Addu(scratch0(), base, scratch0());
2747 __ srl(scratch0(), scratch0(), 1);
2748 __ Addu(scratch0(), base, scratch0());
2754 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2755 LLoadKeyedSpecializedArrayElement* instr) {
2756 Register external_pointer =
ToRegister(instr->external_pointer());
2759 bool key_is_constant = instr->key()->IsConstantOperand();
2760 int constant_key = 0;
2761 if (key_is_constant) {
2763 if (constant_key & 0xF0000000) {
2764 Abort(
"array index constant value too big.");
2770 int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
2771 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
2772 int additional_offset = instr->additional_index() << element_size_shift;
2776 FPURegister result = ToDoubleRegister(instr->result());
2777 if (key_is_constant) {
2778 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
2780 __ sll(scratch0(), key, shift_size);
2781 __ Addu(scratch0(), scratch0(), external_pointer);
2785 __ lwc1(result,
MemOperand(scratch0(), additional_offset));
2786 __ cvt_d_s(result, result);
2788 __ ldc1(result,
MemOperand(scratch0(), additional_offset));
2791 Register result =
ToRegister(instr->result());
2792 MemOperand mem_operand = PrepareKeyedOperand(
2793 key, external_pointer, key_is_constant, constant_key,
2794 element_size_shift, shift_size,
2795 instr->additional_index(), additional_offset);
2796 switch (elements_kind) {
2798 __ lb(result, mem_operand);
2802 __ lbu(result, mem_operand);
2805 __ lh(result, mem_operand);
2808 __ lhu(result, mem_operand);
2811 __ lw(result, mem_operand);
2814 __ lw(result, mem_operand);
2817 result, Operand(0x80000000));
2837 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2841 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2842 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2846 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2847 Register scratch = scratch0();
2848 Register temp = scratch1();
2849 Register result =
ToRegister(instr->result());
2851 if (instr->hydrogen()->from_inlined()) {
2852 __ Subu(result,
sp, 2 * kPointerSize);
2855 Label done, adapted;
2862 __ Movn(result,
fp, temp);
2863 __ Movz(result, scratch, temp);
2868 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2869 Register elem =
ToRegister(instr->elements());
2870 Register result =
ToRegister(instr->result());
2875 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
2876 __ Branch(&done,
eq,
fp, Operand(elem));
2882 __ SmiUntag(result);
2889 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2890 Register receiver =
ToRegister(instr->receiver());
2891 Register
function =
ToRegister(instr->function());
2892 Register scratch = scratch0();
2897 Label global_object, receiver_ok;
2907 int32_t strict_mode_function_mask =
2910 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
2911 __ Branch(&receiver_ok,
ne, scratch, Operand(zero_reg));
2914 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2915 __ Branch(&global_object,
eq, receiver, Operand(scratch));
2916 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2917 __ Branch(&global_object,
eq, receiver, Operand(scratch));
2921 DeoptimizeIf(
eq, instr->environment(), scratch, Operand(zero_reg));
2923 __ GetObjectType(receiver, scratch, scratch);
2924 DeoptimizeIf(
lt, instr->environment(),
2926 __ Branch(&receiver_ok);
2928 __ bind(&global_object);
2932 __ bind(&receiver_ok);
2935 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2936 Register receiver =
ToRegister(instr->receiver());
2937 Register
function =
ToRegister(instr->function());
2938 Register length =
ToRegister(instr->length());
2939 Register elements =
ToRegister(instr->elements());
2940 Register scratch = scratch0();
2947 const uint32_t kArgumentsLimit = 1 *
KB;
2948 DeoptimizeIf(
hi, instr->environment(), length, Operand(kArgumentsLimit));
2953 __ Move(receiver, length);
2955 __ Addu(elements, elements, Operand(1 * kPointerSize));
2962 __ sll(scratch, length, 2);
2964 __ Addu(scratch, elements, scratch);
2967 __ Subu(length, length, Operand(1));
2969 __ sll(scratch, length, 2);
2972 ASSERT(instr->HasPointerMap());
2973 LPointerMap* pointers = instr->pointer_map();
2974 RecordPosition(pointers->position());
2975 SafepointGenerator safepoint_generator(
2976 this, pointers, Safepoint::kLazyDeopt);
2979 ParameterCount actual(receiver);
2986 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2987 LOperand* argument = instr->value();
2988 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2989 Abort(
"DoPushArgument not implemented for double type.");
2991 Register argument_reg = EmitLoadRegister(argument, at);
2992 __ push(argument_reg);
2997 void LCodeGen::DoDrop(LDrop* instr) {
2998 __ Drop(instr->count());
3002 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3003 Register result =
ToRegister(instr->result());
3008 void LCodeGen::DoContext(LContext* instr) {
3009 Register result =
ToRegister(instr->result());
3014 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3015 Register context =
ToRegister(instr->context());
3016 Register result =
ToRegister(instr->result());
3022 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3023 __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3024 __ li(scratch1(), Operand(
Smi::FromInt(instr->hydrogen()->flags())));
3026 __ Push(
cp, scratch0(), scratch1());
3027 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3031 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3032 Register result =
ToRegister(instr->result());
3037 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3038 Register global =
ToRegister(instr->global_object());
3039 Register result =
ToRegister(instr->result());
3044 void LCodeGen::CallKnownFunction(Handle<JSFunction>
function,
3046 LInstruction* instr,
3049 bool can_invoke_directly = !
function->NeedsArgumentsAdaption() ||
3050 function->shared()->formal_parameter_count() == arity;
3052 LPointerMap* pointers = instr->pointer_map();
3053 RecordPosition(pointers->position());
3055 if (can_invoke_directly) {
3056 if (a1_state == A1_UNINITIALIZED) {
3057 __ LoadHeapObject(a1,
function);
3065 if (!function->NeedsArgumentsAdaption()) {
3066 __ li(a0, Operand(arity));
3070 __ SetCallKind(t1, call_kind);
3075 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3077 SafepointGenerator
generator(
this, pointers, Safepoint::kLazyDeopt);
3078 ParameterCount count(arity);
3087 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3090 CallKnownFunction(instr->function(),
3098 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3100 Register result =
ToRegister(instr->result());
3101 Register scratch = scratch0();
3105 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3106 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(at));
3109 Register exponent = scratch0();
3114 __ Move(result, input);
3116 __ Branch(&done,
eq, at, Operand(zero_reg));
3121 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
3125 Register tmp1 = input.is(a1) ? a0 : a1;
3126 Register tmp2 = input.is(a2) ? a0 : a2;
3127 Register tmp3 = input.is(a3) ? a0 : a3;
3128 Register tmp4 = input.is(t0) ? a0 : t0;
3132 Label allocated, slow;
3133 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3134 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3135 __ Branch(&allocated);
3140 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3145 __ LoadFromSafepointRegisterSlot(input, input);
3148 __ bind(&allocated);
3156 __ StoreToSafepointRegisterSlot(tmp1, result);
3163 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3165 Register result =
ToRegister(instr->result());
3169 __ mov(result, input);
3170 ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
3171 __ subu(result, zero_reg, input);
3173 DeoptimizeIf(
lt, instr->environment(), result, Operand(zero_reg));
3178 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3180 class DeferredMathAbsTaggedHeapNumber:
public LDeferredCode {
3182 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3183 LUnaryMathOperation* instr)
3184 : LDeferredCode(codegen), instr_(instr) { }
3185 virtual void Generate() {
3186 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3188 virtual LInstruction* instr() {
return instr_; }
3190 LUnaryMathOperation* instr_;
3193 Representation r = instr->hydrogen()->value()->representation();
3195 FPURegister input = ToDoubleRegister(instr->value());
3196 FPURegister result = ToDoubleRegister(instr->result());
3197 __ abs_d(result, input);
3198 }
else if (r.IsInteger32()) {
3199 EmitIntegerMathAbs(instr);
3202 DeferredMathAbsTaggedHeapNumber* deferred =
3203 new(zone()) DeferredMathAbsTaggedHeapNumber(
this, instr);
3206 __ JumpIfNotSmi(input, deferred->entry());
3208 EmitIntegerMathAbs(instr);
3209 __ bind(deferred->exit());
3214 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3216 Register result =
ToRegister(instr->result());
3217 FPURegister single_scratch = double_scratch0().low();
3218 Register scratch1 = scratch0();
3219 Register except_flag =
ToRegister(instr->temp());
3228 DeoptimizeIf(
ne, instr->environment(), except_flag, Operand(zero_reg));
3231 __ mfc1(result, single_scratch);
3236 __ Branch(&done,
ne, result, Operand(zero_reg));
3237 __ mfc1(scratch1, input.high());
3239 DeoptimizeIf(
ne, instr->environment(), scratch1, Operand(zero_reg));
3245 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3247 Register result =
ToRegister(instr->result());
3248 Register scratch = scratch0();
3249 Label done, check_sign_on_zero;
3252 __ mfc1(result, input.high());
3261 __ mov(result, zero_reg);
3263 __ Branch(&check_sign_on_zero);
3271 DeoptimizeIf(
ge, instr->environment(), scratch,
3277 __ Move(double_scratch0(), 0.5);
3278 __ add_d(double_scratch0(), input, double_scratch0());
3282 __ mfc1(result, double_scratch0().high());
3283 __ Xor(result, result, Operand(scratch));
3286 DeoptimizeIf(
lt, instr->environment(), result,
3292 __ Branch(&skip2,
ge, result, Operand(zero_reg));
3293 __ mov(result, zero_reg);
3298 Register except_flag = scratch;
3301 double_scratch0().low(),
3306 DeoptimizeIf(
ne, instr->environment(), except_flag, Operand(zero_reg));
3308 __ mfc1(result, double_scratch0().low());
3312 __ Branch(&done,
ne, result, Operand(zero_reg));
3313 __ bind(&check_sign_on_zero);
3314 __ mfc1(scratch, input.high());
3316 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(zero_reg));
3322 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3325 __ sqrt_d(result, input);
3329 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3334 ASSERT(!input.is(result));
3344 __ neg_d(result, temp);
3348 __ sqrt_d(result, result);
3353 void LCodeGen::DoPower(LPower* instr) {
3354 Representation exponent_type = instr->hydrogen()->right()->representation();
3357 ASSERT(!instr->right()->IsDoubleRegister() ||
3358 ToDoubleRegister(instr->right()).is(
f4));
3359 ASSERT(!instr->right()->IsRegister() ||
3361 ASSERT(ToDoubleRegister(instr->left()).is(
f2));
3362 ASSERT(ToDoubleRegister(instr->result()).is(
f0));
3364 if (exponent_type.IsTagged()) {
3366 __ JumpIfSmi(a2, &no_deopt);
3368 DeoptimizeIf(
ne, instr->environment(), t3, Operand(at));
3372 }
else if (exponent_type.IsInteger32()) {
3376 ASSERT(exponent_type.IsDouble());
3383 void LCodeGen::DoRandom(LRandom* instr) {
3384 class DeferredDoRandom:
public LDeferredCode {
3386 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3387 : LDeferredCode(codegen), instr_(instr) { }
3388 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3389 virtual LInstruction* instr() {
return instr_; }
3394 DeferredDoRandom* deferred =
new(zone()) DeferredDoRandom(
this, instr);
3397 ASSERT(ToDoubleRegister(instr->result()).is(
f0));
3400 static const int kSeedSize =
sizeof(uint32_t);
3404 static const int kRandomSeedOffset =
3411 __ Branch(deferred->entry(),
eq, a1, Operand(zero_reg));
3418 __ And(a3, a1, Operand(0xFFFF));
3419 __ li(t0, Operand(18273));
3422 __ Addu(a1, a3, a1);
3427 __ And(a3, a0, Operand(0xFFFF));
3428 __ li(t0, Operand(36969));
3431 __ Addu(a0, a3, a0);
3436 __ And(a0, a0, Operand(0x3FFFF));
3438 __ Addu(v0, a0, a1);
3440 __ bind(deferred->exit());
3443 __ li(a2, Operand(0x41300000));
3445 __ Move(
f12, v0, a2);
3447 __ Move(
f14, zero_reg, a2);
3452 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3453 __ PrepareCallCFunction(1, scratch0());
3454 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3459 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3460 ASSERT(ToDoubleRegister(instr->result()).is(
f4));
3463 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3467 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3468 ASSERT(ToDoubleRegister(instr->result()).is(
f4));
3471 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3475 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3476 ASSERT(ToDoubleRegister(instr->result()).is(
f4));
3479 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3483 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3484 ASSERT(ToDoubleRegister(instr->result()).is(
f4));
3487 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3491 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3492 switch (instr->op()) {
3506 DoMathPowHalf(instr);
3521 Abort(
"Unimplemented type of LUnaryMathOperation.");
3527 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3529 ASSERT(instr->HasPointerMap());
3531 if (instr->known_function().is_null()) {
3532 LPointerMap* pointers = instr->pointer_map();
3533 RecordPosition(pointers->position());
3534 SafepointGenerator
generator(
this, pointers, Safepoint::kLazyDeopt);
3535 ParameterCount count(instr->arity());
3539 CallKnownFunction(instr->known_function(),
3543 A1_CONTAINS_TARGET);
3548 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3551 int arity = instr->arity();
3553 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3554 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3559 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3562 int arity = instr->arity();
3563 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3565 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3566 __ li(a2, Operand(instr->name()));
3567 CallCode(ic, mode, instr);
3573 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3577 int arity = instr->arity();
3579 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3584 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3587 int arity = instr->arity();
3588 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3590 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3591 __ li(a2, Operand(instr->name()));
3592 CallCode(ic, mode, instr);
3597 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3599 CallKnownFunction(instr->target(),
3607 void LCodeGen::DoCallNew(LCallNew* instr) {
3612 __ li(a0, Operand(instr->arity()));
3613 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3617 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3618 CallRuntime(instr->function(), instr->arity(), instr);
3622 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3623 Register
object =
ToRegister(instr->object());
3625 Register scratch = scratch0();
3626 int offset = instr->offset();
3628 ASSERT(!
object.is(value));
3630 if (!instr->transition().is_null()) {
3631 __ li(scratch, Operand(instr->transition()));
3633 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3636 __ RecordWriteField(
object,
3648 HType type = instr->hydrogen()->value()->type();
3651 if (instr->is_in_object()) {
3653 if (instr->hydrogen()->NeedsWriteBarrier()) {
3655 __ RecordWriteField(
object,
3667 if (instr->hydrogen()->NeedsWriteBarrier()) {
3670 __ RecordWriteField(scratch,
3683 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3688 __ li(a2, Operand(instr->name()));
3689 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
3690 ? isolate()->builtins()->StoreIC_Initialize_Strict()
3691 : isolate()->builtins()->StoreIC_Initialize();
3692 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3696 void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
3698 LOperand* operand) {
3699 if (value->representation().IsTagged() && !value->type().IsSmi()) {
3700 if (operand->IsRegister()) {
3702 DeoptimizeIf(
ne, environment, at, Operand(zero_reg));
3704 __ li(at, ToOperand(operand));
3706 DeoptimizeIf(
ne, environment, at, Operand(zero_reg));
3712 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3713 DeoptIfTaggedButNotSmi(instr->environment(),
3714 instr->hydrogen()->length(),
3716 DeoptIfTaggedButNotSmi(instr->environment(),
3717 instr->hydrogen()->index(),
3719 if (instr->index()->IsConstantOperand()) {
3720 int constant_index =
3722 if (instr->hydrogen()->length()->representation().IsTagged()) {
3725 __ li(at, Operand(constant_index));
3728 instr->environment(),
3733 instr->environment(),
3740 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3742 Register elements =
ToRegister(instr->object());
3743 Register key = instr->key()->IsRegister() ?
ToRegister(instr->key()) :
no_reg;
3744 Register scratch = scratch0();
3745 Register store_base = scratch;
3749 if (instr->key()->IsConstantOperand()) {
3750 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3753 instr->additional_index());
3754 store_base = elements;
3760 if (instr->hydrogen()->key()->representation().IsTagged()) {
3762 __ addu(scratch, elements, scratch);
3765 __ addu(scratch, elements, scratch);
3771 if (instr->hydrogen()->NeedsWriteBarrier()) {
3772 HType type = instr->hydrogen()->value()->type();
3777 __ RecordWrite(elements,
3788 void LCodeGen::DoStoreKeyedFastDoubleElement(
3789 LStoreKeyedFastDoubleElement* instr) {
3791 Register elements =
ToRegister(instr->elements());
3793 Register scratch = scratch0();
3794 bool key_is_constant = instr->key()->IsConstantOperand();
3795 int constant_key = 0;
3800 if (key_is_constant) {
3802 if (constant_key & 0xF0000000) {
3803 Abort(
"array index constant value too big.");
3809 int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
3810 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
3811 if (key_is_constant) {
3812 __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
3815 __ sll(scratch, key, shift_size);
3816 __ Addu(scratch, elements, Operand(scratch));
3817 __ Addu(scratch, scratch,
3821 if (instr->NeedsCanonicalization()) {
3824 __ BranchF(
NULL, &is_nan,
eq, value, value);
3825 __ Branch(¬_nan);
3833 __ sdc1(value,
MemOperand(scratch, instr->additional_index() <<
3834 element_size_shift));
3838 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3839 LStoreKeyedSpecializedArrayElement* instr) {
3841 Register external_pointer =
ToRegister(instr->external_pointer());
3844 bool key_is_constant = instr->key()->IsConstantOperand();
3845 int constant_key = 0;
3846 if (key_is_constant) {
3848 if (constant_key & 0xF0000000) {
3849 Abort(
"array index constant value too big.");
3855 int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
3856 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
3857 int additional_offset = instr->additional_index() << element_size_shift;
3861 FPURegister value(ToDoubleRegister(instr->value()));
3862 if (key_is_constant) {
3863 __ Addu(scratch0(), external_pointer, constant_key <<
3864 element_size_shift);
3866 __ sll(scratch0(), key, shift_size);
3867 __ Addu(scratch0(), scratch0(), external_pointer);
3871 __ cvt_s_d(double_scratch0(), value);
3872 __ swc1(double_scratch0(),
MemOperand(scratch0(), additional_offset));
3874 __ sdc1(value,
MemOperand(scratch0(), additional_offset));
3878 MemOperand mem_operand = PrepareKeyedOperand(
3879 key, external_pointer, key_is_constant, constant_key,
3880 element_size_shift, shift_size,
3881 instr->additional_index(), additional_offset);
3882 switch (elements_kind) {
3886 __ sb(value, mem_operand);
3890 __ sh(value, mem_operand);
3894 __ sw(value, mem_operand);
3912 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3917 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
3918 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3919 : isolate()->builtins()->KeyedStoreIC_Initialize();
3920 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3924 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3925 Register object_reg =
ToRegister(instr->object());
3926 Register new_map_reg =
ToRegister(instr->new_map_temp());
3927 Register scratch = scratch0();
3929 Handle<Map> from_map = instr->original_map();
3930 Handle<Map> to_map = instr->transitioned_map();
3936 Label not_applicable;
3938 __ Branch(¬_applicable,
ne, scratch, Operand(from_map));
3940 __ li(new_map_reg, Operand(to_map));
3948 Register fixed_object_reg =
ToRegister(instr->temp());
3949 ASSERT(fixed_object_reg.is(a2));
3950 ASSERT(new_map_reg.is(a3));
3951 __ mov(fixed_object_reg, object_reg);
3952 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3953 RelocInfo::CODE_TARGET, instr);
3956 Register fixed_object_reg =
ToRegister(instr->temp());
3957 ASSERT(fixed_object_reg.is(a2));
3958 ASSERT(new_map_reg.is(a3));
3959 __ mov(fixed_object_reg, object_reg);
3960 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3961 RelocInfo::CODE_TARGET, instr);
3965 __ bind(¬_applicable);
3969 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3973 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3977 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3978 class DeferredStringCharCodeAt:
public LDeferredCode {
3980 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3981 : LDeferredCode(codegen), instr_(instr) { }
3982 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3983 virtual LInstruction* instr() {
return instr_; }
3985 LStringCharCodeAt* instr_;
3988 DeferredStringCharCodeAt* deferred =
3989 new(zone()) DeferredStringCharCodeAt(
this, instr);
3995 __ bind(deferred->exit());
3999 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4000 Register
string =
ToRegister(instr->string());
4001 Register result =
ToRegister(instr->result());
4002 Register scratch = scratch0();
4007 __ mov(result, zero_reg);
4009 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4013 if (instr->index()->IsConstantOperand()) {
4022 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4025 __ StoreToSafepointRegisterSlot(v0, result);
4029 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4030 class DeferredStringCharFromCode:
public LDeferredCode {
4032 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4033 : LDeferredCode(codegen), instr_(instr) { }
4034 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4035 virtual LInstruction* instr() {
return instr_; }
4037 LStringCharFromCode* instr_;
4040 DeferredStringCharFromCode* deferred =
4041 new(zone()) DeferredStringCharFromCode(
this, instr);
4043 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4044 Register char_code =
ToRegister(instr->char_code());
4045 Register result =
ToRegister(instr->result());
4046 Register scratch = scratch0();
4047 ASSERT(!char_code.is(result));
4049 __ Branch(deferred->entry(),
hi,
4051 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4053 __ Addu(result, result, scratch);
4055 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4056 __ Branch(deferred->entry(),
eq, result, Operand(scratch));
4057 __ bind(deferred->exit());
4061 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4062 Register char_code =
ToRegister(instr->char_code());
4063 Register result =
ToRegister(instr->result());
4068 __ mov(result, zero_reg);
4070 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4071 __ SmiTag(char_code);
4073 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4074 __ StoreToSafepointRegisterSlot(v0, result);
4078 void LCodeGen::DoStringLength(LStringLength* instr) {
4079 Register
string =
ToRegister(instr->string());
4080 Register result =
ToRegister(instr->result());
4085 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4086 LOperand* input = instr->value();
4087 ASSERT(input->IsRegister() || input->IsStackSlot());
4088 LOperand* output = instr->result();
4089 ASSERT(output->IsDoubleRegister());
4090 FPURegister single_scratch = double_scratch0().low();
4091 if (input->IsStackSlot()) {
4092 Register scratch = scratch0();
4093 __ lw(scratch, ToMemOperand(input));
4094 __ mtc1(scratch, single_scratch);
4098 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4102 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4103 LOperand* input = instr->value();
4104 LOperand* output = instr->result();
4106 FPURegister dbl_scratch = double_scratch0();
4108 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch,
f22);
4112 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4113 class DeferredNumberTagI:
public LDeferredCode {
4115 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4116 : LDeferredCode(codegen), instr_(instr) { }
4117 virtual void Generate() {
4118 codegen()->DoDeferredNumberTagI(instr_,
4122 virtual LInstruction* instr() {
return instr_; }
4124 LNumberTagI* instr_;
4129 Register overflow = scratch0();
4131 DeferredNumberTagI* deferred =
new(zone()) DeferredNumberTagI(
this, instr);
4132 __ SmiTagCheckOverflow(dst, src, overflow);
4133 __ BranchOnOverflow(deferred->entry(),
overflow);
4134 __ bind(deferred->exit());
4138 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4139 class DeferredNumberTagU:
public LDeferredCode {
4141 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4142 : LDeferredCode(codegen), instr_(instr) { }
4143 virtual void Generate() {
4144 codegen()->DoDeferredNumberTagI(instr_,
4148 virtual LInstruction* instr() {
return instr_; }
4150 LNumberTagU* instr_;
4153 LOperand* input = instr->value();
4154 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4157 DeferredNumberTagU* deferred =
new(zone()) DeferredNumberTagU(
this, instr);
4159 __ SmiTag(reg, reg);
4160 __ bind(deferred->exit());
4164 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4166 IntegerSignedness signedness) {
4170 FPURegister dbl_scratch = double_scratch0();
4173 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4176 if (signedness == SIGNED_INT32) {
4181 __ SmiUntag(src, dst);
4182 __ Xor(src, src, Operand(0x80000000));
4184 __ mtc1(src, dbl_scratch);
4185 __ cvt_d_w(dbl_scratch, dbl_scratch);
4187 __ mtc1(src, dbl_scratch);
4188 __ Cvt_d_uw(dbl_scratch, dbl_scratch,
f22);
4191 if (FLAG_inline_new) {
4192 __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
4193 __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
4204 __ StoreToSafepointRegisterSlot(zero_reg, dst);
4205 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4212 __ StoreToSafepointRegisterSlot(dst, dst);
4216 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4217 class DeferredNumberTagD:
public LDeferredCode {
4219 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4220 : LDeferredCode(codegen), instr_(instr) { }
4221 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4222 virtual LInstruction* instr() {
return instr_; }
4224 LNumberTagD* instr_;
4228 Register scratch = scratch0();
4233 DeferredNumberTagD* deferred =
new(zone()) DeferredNumberTagD(
this, instr);
4234 if (FLAG_inline_new) {
4235 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4236 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4238 __ Branch(deferred->entry());
4240 __ bind(deferred->exit());
4245 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4250 __ mov(reg, zero_reg);
4252 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4253 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4254 __ StoreToSafepointRegisterSlot(v0, reg);
4258 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4264 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4265 Register scratch = scratch0();
4267 Register result =
ToRegister(instr->result());
4268 if (instr->needs_check()) {
4272 __ SmiUntag(result, input);
4273 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(zero_reg));
4275 __ SmiUntag(result, input);
4280 void LCodeGen::EmitNumberUntagD(Register input_reg,
4282 bool deoptimize_on_undefined,
4283 bool deoptimize_on_minus_zero,
4284 LEnvironment* env) {
4285 Register scratch = scratch0();
4287 Label load_smi, heap_number, done;
4290 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4294 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4295 if (deoptimize_on_undefined) {
4296 DeoptimizeIf(
ne, env, scratch, Operand(at));
4299 __ Branch(&heap_number,
eq, scratch, Operand(at));
4301 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4302 DeoptimizeIf(
ne, env, input_reg, Operand(at));
4305 __ LoadRoot(at, Heap::kNanValueRootIndex);
4309 __ bind(&heap_number);
4313 if (deoptimize_on_minus_zero) {
4314 __ mfc1(at, result_reg.low());
4315 __ Branch(&done,
ne, at, Operand(zero_reg));
4316 __ mfc1(scratch, result_reg.high());
4324 __ mtc1(scratch, result_reg);
4325 __ cvt_d_w(result_reg, result_reg);
4330 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4331 Register input_reg =
ToRegister(instr->value());
4332 Register scratch1 = scratch0();
4333 Register scratch2 =
ToRegister(instr->temp());
4335 FPURegister single_scratch = double_scratch.low();
4337 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4338 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4345 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4349 if (instr->truncating()) {
4350 Register scratch3 =
ToRegister(instr->temp2());
4351 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
4352 ASSERT(!scratch3.is(input_reg) &&
4353 !scratch3.is(scratch1) &&
4354 !scratch3.is(scratch2));
4358 __ Branch(&heap_number,
eq, scratch1, Operand(at));
4361 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4362 DeoptimizeIf(
ne, instr->environment(), input_reg, Operand(at));
4364 __ mov(input_reg, zero_reg);
4367 __ bind(&heap_number);
4368 __ ldc1(double_scratch2,
4370 __ EmitECMATruncate(input_reg,
4378 DeoptimizeIf(
ne, instr->environment(), scratch1, Operand(at));
4381 __ ldc1(double_scratch,
4384 Register except_flag = scratch2;
4393 DeoptimizeIf(
ne, instr->environment(), except_flag, Operand(zero_reg));
4396 __ mfc1(input_reg, single_scratch);
4399 __ Branch(&done,
ne, input_reg, Operand(zero_reg));
4401 __ mfc1(scratch1, double_scratch.high());
4403 DeoptimizeIf(
ne, instr->environment(), scratch1, Operand(zero_reg));
4410 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4411 class DeferredTaggedToI:
public LDeferredCode {
4413 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4414 : LDeferredCode(codegen), instr_(instr) { }
4415 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4416 virtual LInstruction* instr() {
return instr_; }
4421 LOperand* input = instr->value();
4422 ASSERT(input->IsRegister());
4423 ASSERT(input->Equals(instr->result()));
4427 DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(
this, instr);
4430 __ JumpIfNotSmi(input_reg, deferred->entry());
4433 __ SmiUntag(input_reg);
4434 __ bind(deferred->exit());
4438 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4439 LOperand* input = instr->value();
4440 ASSERT(input->IsRegister());
4441 LOperand* result = instr->result();
4442 ASSERT(result->IsDoubleRegister());
4447 EmitNumberUntagD(input_reg, result_reg,
4448 instr->hydrogen()->deoptimize_on_undefined(),
4449 instr->hydrogen()->deoptimize_on_minus_zero(),
4450 instr->environment());
4454 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4455 Register result_reg =
ToRegister(instr->result());
4456 Register scratch1 = scratch0();
4457 Register scratch2 =
ToRegister(instr->temp());
4459 FPURegister single_scratch = double_scratch0().
low();
4461 if (instr->truncating()) {
4462 Register scratch3 =
ToRegister(instr->temp2());
4463 __ EmitECMATruncate(result_reg,
4470 Register except_flag = scratch2;
4480 DeoptimizeIf(
ne, instr->environment(), except_flag, Operand(zero_reg));
4483 __ mfc1(result_reg, single_scratch);
4488 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4489 LOperand* input = instr->value();
4491 DeoptimizeIf(
ne, instr->environment(), at, Operand(zero_reg));
4495 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4496 LOperand* input = instr->value();
4498 DeoptimizeIf(
eq, instr->environment(), at, Operand(zero_reg));
4502 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4504 Register scratch = scratch0();
4506 __ GetObjectType(input, scratch, scratch);
4508 if (instr->hydrogen()->is_interval_check()) {
4511 instr->hydrogen()->GetCheckInterval(&first, &last);
4514 if (first == last) {
4515 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(first));
4517 DeoptimizeIf(
lo, instr->environment(), scratch, Operand(first));
4520 DeoptimizeIf(
hi, instr->environment(), scratch, Operand(last));
4526 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4530 __ And(at, scratch, mask);
4531 DeoptimizeIf(tag == 0 ?
ne :
eq, instr->environment(),
4532 at, Operand(zero_reg));
4534 __ And(scratch, scratch, Operand(mask));
4535 DeoptimizeIf(
ne, instr->environment(), scratch, Operand(tag));
4541 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4543 Handle<JSFunction> target = instr->hydrogen()->target();
4544 if (isolate()->heap()->InNewSpace(*target)) {
4546 Handle<JSGlobalPropertyCell> cell =
4547 isolate()->factory()->NewJSGlobalPropertyCell(target);
4548 __ li(at, Operand(Handle<Object>(cell)));
4550 DeoptimizeIf(
ne, instr->environment(), reg,
4553 DeoptimizeIf(
ne, instr->environment(), reg,
4559 void LCodeGen::DoCheckMapCommon(Register reg,
4563 LEnvironment* env) {
4565 __ CompareMapAndBranch(reg, scratch, map, &success,
eq, &success, mode);
4566 DeoptimizeIf(
al, env);
4571 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4572 Register scratch = scratch0();
4573 LOperand* input = instr->value();
4574 ASSERT(input->IsRegister());
4577 SmallMapList* map_set = instr->hydrogen()->map_set();
4578 for (
int i = 0; i < map_set->length() - 1; i++) {
4579 Handle<Map> map = map_set->at(i);
4580 __ CompareMapAndBranch(
4583 Handle<Map> map = map_set->last();
4589 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4591 Register result_reg =
ToRegister(instr->result());
4593 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4597 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4598 Register unclamped_reg =
ToRegister(instr->unclamped());
4599 Register result_reg =
ToRegister(instr->result());
4600 __ ClampUint8(result_reg, unclamped_reg);
4604 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4605 Register scratch = scratch0();
4606 Register input_reg =
ToRegister(instr->unclamped());
4607 Register result_reg =
ToRegister(instr->result());
4609 Label is_smi, done, heap_number;
4612 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
4616 __ Branch(&heap_number,
eq, scratch, Operand(factory()->heap_number_map()));
4620 DeoptimizeIf(
ne, instr->environment(), input_reg,
4621 Operand(factory()->undefined_value()));
4622 __ mov(result_reg, zero_reg);
4626 __ bind(&heap_number);
4629 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4633 __ ClampUint8(result_reg, scratch);
4639 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4643 Handle<JSObject> holder = instr->holder();
4644 Handle<JSObject> current_prototype = instr->prototype();
4647 __ LoadHeapObject(temp1, current_prototype);
4650 while (!current_prototype.is_identical_to(holder)) {
4651 DoCheckMapCommon(temp1, temp2,
4652 Handle<Map>(current_prototype->map()),
4655 Handle<JSObject>(
JSObject::cast(current_prototype->GetPrototype()));
4657 __ LoadHeapObject(temp1, current_prototype);
4661 DoCheckMapCommon(temp1, temp2,
4662 Handle<Map>(current_prototype->map()),
4667 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4668 class DeferredAllocateObject:
public LDeferredCode {
4670 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4671 : LDeferredCode(codegen), instr_(instr) { }
4672 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4673 virtual LInstruction* instr() {
return instr_; }
4675 LAllocateObject* instr_;
4678 DeferredAllocateObject* deferred =
4679 new(zone()) DeferredAllocateObject(
this, instr);
4681 Register result =
ToRegister(instr->result());
4682 Register scratch =
ToRegister(instr->temp());
4683 Register scratch2 =
ToRegister(instr->temp2());
4684 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4685 Handle<Map> initial_map(constructor->initial_map());
4686 int instance_size = initial_map->instance_size();
4687 ASSERT(initial_map->pre_allocated_property_fields() +
4688 initial_map->unused_property_fields() -
4689 initial_map->inobject_properties() == 0);
4694 ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4695 __ AllocateInNewSpace(instance_size,
4702 __ bind(deferred->exit());
4703 if (FLAG_debug_code) {
4704 Label is_in_new_space;
4705 __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4706 __ Abort(
"Allocated object is not in new-space");
4707 __ bind(&is_in_new_space);
4711 Register map = scratch;
4712 __ LoadHeapObject(map, constructor);
4718 __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4721 if (initial_map->inobject_properties() != 0) {
4722 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4723 for (
int i = 0; i < initial_map->inobject_properties(); i++) {
4731 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4732 Register result =
ToRegister(instr->result());
4733 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4734 Handle<Map> initial_map(constructor->initial_map());
4735 int instance_size = initial_map->instance_size();
4740 __ mov(result, zero_reg);
4742 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4745 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4746 __ StoreToSafepointRegisterSlot(v0, result);
4750 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4751 Handle<FixedArray> literals(instr->environment()->closure()->literals());
4753 instr->hydrogen()->boilerplate_elements_kind();
4759 boilerplate_elements_kind,
true)) {
4760 __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
4768 instr->environment(),
4770 Operand(boilerplate_elements_kind));
4774 __ LoadHeapObject(a3, literals);
4775 __ li(a2, Operand(
Smi::FromInt(instr->hydrogen()->literal_index())));
4778 __ li(a1, Operand(isolate()->factory()->empty_fixed_array()));
4779 __ Push(a3, a2, a1);
4782 int length = instr->hydrogen()->length();
4783 if (instr->hydrogen()->IsCopyOnWrite()) {
4784 ASSERT(instr->hydrogen()->depth() == 1);
4787 FastCloneShallowArrayStub stub(mode, length);
4788 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4789 }
else if (instr->hydrogen()->depth() > 1) {
4790 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4792 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4798 FastCloneShallowArrayStub stub(mode, length);
4799 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4804 void LCodeGen::EmitDeepCopy(Handle<JSObject>
object,
4812 Handle<FixedArrayBase> elements(object->elements());
4813 bool has_elements = elements->length() > 0 &&
4814 elements->map() != isolate()->heap()->fixed_cow_array_map();
4818 int object_offset = *offset;
4819 int object_size =
object->map()->instance_size();
4820 int elements_offset = *offset + object_size;
4821 int elements_size = has_elements ? elements->Size() : 0;
4822 *offset += object_size + elements_size;
4825 ASSERT(object->properties()->length() == 0);
4826 int inobject_properties =
object->map()->inobject_properties();
4827 int header_size = object_size - inobject_properties *
kPointerSize;
4830 __ Addu(a2, result, Operand(elements_offset));
4838 for (
int i = 0; i < inobject_properties; i++) {
4839 int total_offset = object_offset +
object->GetInObjectPropertyOffset(i);
4840 Handle<Object> value = Handle<Object>(
object->InObjectPropertyAt(i));
4841 if (value->IsJSObject()) {
4843 __ Addu(a2, result, Operand(*offset));
4845 __ LoadHeapObject(source, value_object);
4846 EmitDeepCopy(value_object, result, source, offset);
4847 }
else if (value->IsHeapObject()) {
4848 __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4851 __ li(a2, Operand(value));
4859 __ LoadHeapObject(source, elements);
4866 int elements_length = has_elements ? elements->length() : 0;
4867 if (elements->IsFixedDoubleArray()) {
4868 Handle<FixedDoubleArray> double_array =
4870 for (
int i = 0; i < elements_length; i++) {
4871 int64_t value = double_array->get_representation(i);
4877 __ li(a2, Operand(value_low));
4879 __ li(a2, Operand(value_high));
4882 }
else if (elements->IsFixedArray()) {
4884 for (
int i = 0; i < elements_length; i++) {
4886 Handle<Object> value(fast_elements->get(i));
4887 if (value->IsJSObject()) {
4889 __ Addu(a2, result, Operand(*offset));
4891 __ LoadHeapObject(source, value_object);
4892 EmitDeepCopy(value_object, result, source, offset);
4893 }
else if (value->IsHeapObject()) {
4894 __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4897 __ li(a2, Operand(value));
4908 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4909 int size = instr->hydrogen()->total_size();
4911 instr->hydrogen()->boilerplate()->GetElementsKind();
4917 boilerplate_elements_kind,
true)) {
4918 __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4925 DeoptimizeIf(
ne, instr->environment(), a2,
4926 Operand(boilerplate_elements_kind));
4931 Label allocated, runtime_allocate;
4932 __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate,
TAG_OBJECT);
4935 __ bind(&runtime_allocate);
4938 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4940 __ bind(&allocated);
4942 __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4943 EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
4948 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4950 Handle<FixedArray> literals(instr->environment()->closure()->literals());
4951 Handle<FixedArray> constant_properties =
4952 instr->hydrogen()->constant_properties();
4955 __ LoadHeapObject(t0, literals);
4956 __ li(a3, Operand(
Smi::FromInt(instr->hydrogen()->literal_index())));
4957 __ li(a2, Operand(constant_properties));
4958 int flags = instr->hydrogen()->fast_elements()
4962 __ Push(t0, a3, a2, a1);
4965 int properties_count = constant_properties->length() / 2;
4966 if (instr->hydrogen()->depth() > 1) {
4967 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4970 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4972 FastCloneShallowObjectStub stub(properties_count);
4973 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4978 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4982 CallRuntime(Runtime::kToFastProperties, 1, instr);
4986 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4993 int literal_offset =
4995 __ LoadHeapObject(t3, instr->hydrogen()->literals());
4997 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4998 __ Branch(&materialized,
ne, a1, Operand(at));
5002 __ li(t2, Operand(
Smi::FromInt(instr->hydrogen()->literal_index())));
5003 __ li(t1, Operand(instr->hydrogen()->pattern()));
5004 __ li(t0, Operand(instr->hydrogen()->flags()));
5005 __ Push(t3, t2, t1, t0);
5006 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5009 __ bind(&materialized);
5011 Label allocated, runtime_allocate;
5013 __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate,
TAG_OBJECT);
5016 __ bind(&runtime_allocate);
5019 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5022 __ bind(&allocated);
5031 if ((size % (2 * kPointerSize)) != 0) {
5038 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5041 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
5042 bool pretenure = instr->hydrogen()->pretenure();
5043 if (!pretenure && shared_info->num_literals() == 0) {
5044 FastNewClosureStub stub(shared_info->language_mode());
5045 __ li(a1, Operand(shared_info));
5047 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5049 __ li(a2, Operand(shared_info));
5050 __ li(a1, Operand(pretenure
5051 ? factory()->true_value()
5052 : factory()->false_value()));
5053 __ Push(
cp, a2, a1);
5054 CallRuntime(Runtime::kNewClosure, 3, instr);
5059 void LCodeGen::DoTypeof(LTypeof* instr) {
5063 CallRuntime(Runtime::kTypeof, 1, instr);
5067 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5069 int true_block = chunk_->LookupDestination(instr->true_block_id());
5070 int false_block = chunk_->LookupDestination(instr->false_block_id());
5071 Label* true_label = chunk_->GetAssemblyLabel(true_block);
5072 Label* false_label = chunk_->GetAssemblyLabel(false_block);
5075 Operand cmp2 = Operand(
no_reg);
5077 Condition final_branch_condition = EmitTypeofIs(true_label,
5080 instr->type_literal(),
5085 ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
5088 EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
5093 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5096 Handle<String> type_name,
5103 Register scratch = scratch0();
5104 if (type_name->Equals(heap()->number_symbol())) {
5105 __ JumpIfSmi(input, true_label);
5107 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5110 final_branch_condition =
eq;
5112 }
else if (type_name->Equals(heap()->string_symbol())) {
5113 __ JumpIfSmi(input, false_label);
5114 __ GetObjectType(input, input, scratch);
5122 cmp2 = Operand(zero_reg);
5123 final_branch_condition =
eq;
5125 }
else if (type_name->Equals(heap()->boolean_symbol())) {
5126 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5128 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5130 cmp2 = Operand(input);
5131 final_branch_condition =
eq;
5133 }
else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
5134 __ LoadRoot(at, Heap::kNullValueRootIndex);
5136 cmp2 = Operand(input);
5137 final_branch_condition =
eq;
5139 }
else if (type_name->Equals(heap()->undefined_symbol())) {
5140 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5144 __ JumpIfSmi(input, false_label);
5150 cmp2 = Operand(zero_reg);
5151 final_branch_condition =
ne;
5153 }
else if (type_name->Equals(heap()->function_symbol())) {
5155 __ JumpIfSmi(input, false_label);
5156 __ GetObjectType(input, scratch, input);
5160 final_branch_condition =
eq;
5162 }
else if (type_name->Equals(heap()->object_symbol())) {
5163 __ JumpIfSmi(input, false_label);
5164 if (!FLAG_harmony_typeof) {
5165 __ LoadRoot(at, Heap::kNullValueRootIndex);
5169 __ GetObjectType(input, input, scratch);
5181 cmp2 = Operand(zero_reg);
5182 final_branch_condition =
eq;
5186 cmp2 = Operand(zero_reg);
5187 __ Branch(false_label);
5190 return final_branch_condition;
5194 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5196 int true_block = chunk_->LookupDestination(instr->true_block_id());
5197 int false_block = chunk_->LookupDestination(instr->false_block_id());
5199 EmitIsConstructCall(temp1, scratch0());
5201 EmitBranch(true_block, false_block,
eq, temp1,
5206 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5207 ASSERT(!temp1.is(temp2));
5212 Label check_frame_marker;
5214 __ Branch(&check_frame_marker,
ne, temp2,
5219 __ bind(&check_frame_marker);
5224 void LCodeGen::EnsureSpaceForLazyDeopt() {
5227 int current_pc = masm()->pc_offset();
5229 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5230 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5232 while (padding_size > 0) {
5237 last_lazy_deopt_pc_ = masm()->pc_offset();
5241 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5242 EnsureSpaceForLazyDeopt();
5243 ASSERT(instr->HasEnvironment());
5244 LEnvironment* env = instr->environment();
5245 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5246 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5250 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5251 DeoptimizeIf(
al, instr->environment(), zero_reg, Operand(zero_reg));
5255 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5256 Register
object =
ToRegister(instr->object());
5258 Register strict = scratch0();
5260 __ Push(
object, key, strict);
5261 ASSERT(instr->HasPointerMap());
5262 LPointerMap* pointers = instr->pointer_map();
5263 RecordPosition(pointers->position());
5264 SafepointGenerator safepoint_generator(
5265 this, pointers, Safepoint::kLazyDeopt);
5266 __ InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, safepoint_generator);
5270 void LCodeGen::DoIn(LIn* instr) {
5274 ASSERT(instr->HasPointerMap());
5275 LPointerMap* pointers = instr->pointer_map();
5276 RecordPosition(pointers->position());
5277 SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
5282 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5283 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5284 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5285 RecordSafepointWithLazyDeopt(
5286 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5287 ASSERT(instr->HasEnvironment());
5288 LEnvironment* env = instr->environment();
5289 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5293 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5294 class DeferredStackCheck:
public LDeferredCode {
5296 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5297 : LDeferredCode(codegen), instr_(instr) { }
5298 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5299 virtual LInstruction* instr() {
return instr_; }
5301 LStackCheck* instr_;
5304 ASSERT(instr->HasEnvironment());
5305 LEnvironment* env = instr->environment();
5308 if (instr->hydrogen()->is_function_entry()) {
5311 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5312 __ Branch(&done,
hs,
sp, Operand(at));
5313 StackCheckStub stub;
5314 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5315 EnsureSpaceForLazyDeopt();
5317 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5318 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5320 ASSERT(instr->hydrogen()->is_backwards_branch());
5322 DeferredStackCheck* deferred_stack_check =
5323 new(zone()) DeferredStackCheck(
this, instr);
5324 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5325 __ Branch(deferred_stack_check->entry(),
lo,
sp, Operand(at));
5326 EnsureSpaceForLazyDeopt();
5327 __ bind(instr->done_label());
5328 deferred_stack_check->SetExit(instr->done_label());
5329 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5337 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5341 LEnvironment* environment = instr->environment();
5342 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5343 instr->SpilledDoubleRegisterArray());
5347 ASSERT(!environment->HasBeenRegistered());
5348 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5349 ASSERT(osr_pc_offset_ == -1);
5350 osr_pc_offset_ = masm()->pc_offset();
5354 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5355 Register result =
ToRegister(instr->result());
5356 Register
object =
ToRegister(instr->object());
5357 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5358 DeoptimizeIf(
eq, instr->environment(), object, Operand(at));
5360 Register null_value = t1;
5361 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5362 DeoptimizeIf(
eq, instr->environment(), object, Operand(null_value));
5365 DeoptimizeIf(
eq, instr->environment(), at, Operand(zero_reg));
5368 __ GetObjectType(
object, a1, a1);
5371 Label use_cache, call_runtime;
5373 __ CheckEnumCache(null_value, &call_runtime);
5376 __ Branch(&use_cache);
5379 __ bind(&call_runtime);
5381 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5385 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5386 DeoptimizeIf(
ne, instr->environment(), a1, Operand(at));
5387 __ bind(&use_cache);
5391 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5393 Register result =
ToRegister(instr->result());
5394 Label load_cache, done;
5395 __ EnumLength(result, map);
5397 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5400 __ bind(&load_cache);
5401 __ LoadInstanceDescriptors(map, result);
5406 DeoptimizeIf(
eq, instr->environment(), result, Operand(zero_reg));
5412 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5413 Register
object =
ToRegister(instr->value());
5416 DeoptimizeIf(
ne, instr->environment(), map, Operand(scratch0()));
5420 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5421 Register
object =
ToRegister(instr->object());
5423 Register result =
ToRegister(instr->result());
5424 Register scratch = scratch0();
5426 Label out_of_object, done;
5431 __ Addu(scratch,
object, scratch);
5436 __ bind(&out_of_object);
5439 __ Subu(scratch, result, scratch);
5441 FixedArray::kHeaderSize - kPointerSize));
static const int kCallerFPOffset
static const int kLengthOffset
static const int kBitFieldOffset
static LGap * cast(LInstruction *instr)
const intptr_t kSmiTagMask
static const int kCodeEntryOffset
static const int kMaxAsciiCharCode
static const int kPrototypeOrInitialMapOffset
static int SlotOffset(int index)
virtual void AfterCall() const
static const int kEnumCacheOffset
static Smi * FromInt(int value)
bool IsFastObjectElementsKind(ElementsKind kind)
static const int kElementsKindBitCount
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
static const int kGlobalReceiverOffset
static const int kExponentBias
static const int kExternalPointerOffset
virtual ~SafepointGenerator()
static const int kCallerSPOffset
#define ASSERT(condition)
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
static const int kInObjectFieldCount
static const int kMaximumSlots
MemOperand GlobalObjectOperand()
static const int kInstanceClassNameOffset
int WhichPowerOf2(uint32_t x)
bool is_uint32(int64_t x)
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
static const int kHashFieldOffset
static DwVfpRegister FromAllocationIndex(int index)
Condition ReverseCondition(Condition cond)
const uint32_t kSlotsZapValue
DwVfpRegister DoubleRegister
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
static const int kExponentShift
FPURegister FloatRegister
static const int kValueOffset
const uint32_t kHoleNanUpper32
static void MaybeCallEntryHook(MacroAssembler *masm)
static LConstantOperand * cast(LOperand *op)
const uint32_t kHoleNanLower32
static Register FromAllocationIndex(int index)
static const int kCacheStampOffset
static const int kPropertiesOffset
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kElementsOffset
static const int kContainsCachedArrayIndexMask
static Vector< T > New(int length)
friend class BlockTrampolinePoolScope
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
static int OffsetOfElementAt(int index)
static const int kLengthOffset
static int SizeFor(int length)
static const int kHeaderSize
static const int kMapOffset
static const int kValueOffset
static const int kLengthOffset
static Address GetDeoptimizationEntry(int id, BailoutType type)
MemOperand FieldMemOperand(Register object, int offset)
static const int kHasNonInstancePrototype
static const int kContextOffset
static const int kFunctionOffset
ElementsKind GetInitialFastElementsKind()
static const uint32_t kSignMask
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
static const int kElementsKindShift
SwVfpRegister low() const
static const int kConstructorOffset
static double canonical_not_the_hole_nan_as_double()
static const int kIsUndetectable
static const int kHeaderSize
static const int kMaximumClonedProperties
static const int kInstrSize
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static const int kPrototypeOffset
#define RUNTIME_ENTRY(name, nargs, ressize)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kValueOffset
static const int kNativeContextOffset
static const int kMarkerOffset
static const int kExponentBits
static const int kCompilerHintsOffset
static const int kSharedFunctionInfoOffset
Register ToRegister(int num)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
static const int kMaxValue
static const int kBitField2Offset
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
static const int kMaximumClonedLength
static const int kExponentOffset
static const int kValueOffset
bool EvalComparison(Token::Value op, double op1, double op2)
static JSObject * cast(Object *obj)
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
virtual void BeforeCall(int call_size) const
static const int kMantissaOffset