40 class SafepointGenerator
V8_FINAL :
public CallWrapper {
43 LPointerMap* pointers,
44 Safepoint::DeoptMode
mode)
53 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 LPointerMap* pointers_;
59 Safepoint::DeoptMode deopt_mode_;
66 LPhase phase(
"Z_Code generation", chunk());
75 return GeneratePrologue() &&
77 GenerateDeferredCode() &&
78 GenerateDeoptJumpTable() &&
79 GenerateSafepointTable();
85 code->set_stack_slots(GetStackSlotCount());
86 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
88 PopulateDeoptimizationData(code);
89 info()->CommitDependencies(code);
94 info()->set_bailout_reason(reason);
99 void LCodeGen::SaveCallerDoubles() {
102 Comment(
";;; Save clobbered callee double registers");
104 BitVector* doubles = chunk()->allocated_double_registers();
105 BitVector::Iterator save_iterator(doubles);
106 while (!save_iterator.Done()) {
109 save_iterator.Advance();
115 void LCodeGen::RestoreCallerDoubles() {
118 Comment(
";;; Restore clobbered callee double registers");
119 BitVector* doubles = chunk()->allocated_double_registers();
120 BitVector::Iterator save_iterator(doubles);
122 while (!save_iterator.Done()) {
125 save_iterator.Advance();
131 bool LCodeGen::GeneratePrologue() {
134 if (
info()->IsOptimizing()) {
138 if (strlen(FLAG_stop_at) > 0 &&
139 info_->function()->name()->IsUtf8EqualTo(
CStrVector(FLAG_stop_at))) {
153 if (info_->this_has_uses() &&
154 info_->strict_mode() ==
SLOPPY &&
155 !info_->is_native()) {
157 int receiver_offset = info_->scope()->num_parameters() *
kPointerSize;
159 __ CompareRoot(
r2, Heap::kUndefinedValueRootIndex);
171 info()->set_prologue_offset(masm_->pc_offset());
174 frame_is_built_ =
true;
175 info_->AddNoFrameRange(0, masm_->pc_offset());
179 int slots = GetStackSlotCount();
181 if (FLAG_debug_code) {
200 if (
info()->saves_caller_doubles()) {
206 if (heap_slots > 0) {
207 Comment(
";;; Allocate local context");
209 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
210 FastNewContextStub stub(heap_slots);
214 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
216 RecordSafepoint(Safepoint::kNoLazyDeopt);
223 for (
int i = 0; i < num_parameters; i++) {
225 if (var->IsContextSlot()) {
234 __ RecordWriteContextSlot(
243 Comment(
";;; End allocate local context");
247 if (FLAG_trace &&
info()->IsOptimizing()) {
250 __ CallRuntime(Runtime::kTraceEnter, 0);
252 return !is_aborted();
256 void LCodeGen::GenerateOsrPrologue() {
259 if (osr_pc_offset_ >= 0)
return;
261 osr_pc_offset_ = masm()->pc_offset();
265 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
271 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
272 if (instr->IsCall()) {
275 if (!instr->IsLazyBailout() && !instr->IsGap()) {
276 safepoints_.BumpLastLazySafepointIndex();
281 bool LCodeGen::GenerateDeferredCode() {
283 if (deferred_.length() > 0) {
284 for (
int i = 0; !is_aborted() && i < deferred_.length(); i++) {
288 instructions_->at(code->instruction_index())->hydrogen_value();
289 RecordAndWritePosition(
290 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
292 Comment(
";;; <@%d,#%d> "
293 "-------------------- Deferred %s --------------------",
294 code->instruction_index(),
295 code->instr()->hydrogen_value()->id(),
296 code->instr()->Mnemonic());
297 __ bind(code->entry());
299 Comment(
";;; Build frame");
302 frame_is_built_ =
true;
307 Comment(
";;; Deferred code");
311 Comment(
";;; Destroy frame");
315 frame_is_built_ =
false;
317 __ jmp(code->exit());
323 masm()->CheckConstPool(
true,
false);
325 return !is_aborted();
329 bool LCodeGen::GenerateDeoptJumpTable() {
338 deopt_jump_table_.length() * 7)) {
339 Abort(kGeneratedCodeIsTooLarge);
342 if (deopt_jump_table_.length() > 0) {
343 Comment(
";;; -------------------- Jump table --------------------");
346 __ bind(&table_start);
348 for (
int i = 0; i < deopt_jump_table_.length(); i++) {
349 __ bind(&deopt_jump_table_[i].label);
350 Address entry = deopt_jump_table_[i].address;
354 Comment(
";;; jump table entry %d.", i);
356 Comment(
";;; jump table entry %d: deoptimization bailout %d.", i,
id);
358 if (deopt_jump_table_[i].needs_frame) {
360 __ mov(
ip, Operand(ExternalReference::ForDeoptEntry(entry)));
361 if (needs_frame.is_bound()) {
364 __ bind(&needs_frame);
377 if (
info()->saves_caller_doubles()) {
379 RestoreCallerDoubles();
382 __ mov(
pc, Operand(ExternalReference::ForDeoptEntry(entry)));
384 masm()->CheckConstPool(
false,
false);
389 masm()->CheckConstPool(
true,
false);
393 if (!is_aborted()) status_ =
DONE;
394 return !is_aborted();
398 bool LCodeGen::GenerateSafepointTable() {
400 safepoints_.Emit(masm(), GetStackSlotCount());
401 return !is_aborted();
422 if (op->IsRegister()) {
424 }
else if (op->IsConstantOperand()) {
425 LConstantOperand* const_op = LConstantOperand::cast(op);
426 HConstant* constant = chunk_->LookupConstant(const_op);
431 __ mov(scratch,
Operand(static_cast<int32_t>(literal->Number())));
433 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
436 __ Move(scratch, literal);
439 }
else if (op->IsStackSlot()) {
449 ASSERT(op->IsDoubleRegister());
457 if (op->IsDoubleRegister()) {
459 }
else if (op->IsConstantOperand()) {
460 LConstantOperand* const_op = LConstantOperand::cast(op);
461 HConstant* constant = chunk_->LookupConstant(const_op);
466 __ mov(
ip,
Operand(static_cast<int32_t>(literal->Number())));
467 __ vmov(flt_scratch,
ip);
468 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
471 Abort(kUnsupportedDoubleImmediate);
473 Abort(kUnsupportedTaggedImmediate);
475 }
else if (op->IsStackSlot()) {
479 __ vldr(dbl_scratch, mem_op.rn(), mem_op.
offset());
488 HConstant* constant = chunk_->LookupConstant(op);
489 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
490 return constant->handle(isolate());
495 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
500 return chunk_->LookupLiteralRepresentation(op).IsSmi();
511 HConstant* constant = chunk_->LookupConstant(op);
512 int32_t value = constant->Integer32Value();
520 HConstant* constant = chunk_->LookupConstant(op);
526 HConstant* constant = chunk_->LookupConstant(op);
527 ASSERT(constant->HasDoubleValue());
528 return constant->DoubleValue();
533 if (op->IsConstantOperand()) {
534 LConstantOperand* const_op = LConstantOperand::cast(op);
535 HConstant* constant = chunk()->LookupConstant(const_op);
538 ASSERT(constant->HasSmiValue());
541 ASSERT(constant->HasInteger32Value());
542 return Operand(constant->Integer32Value());
544 Abort(kToOperandUnsupportedDoubleImmediate);
547 return Operand(constant->handle(isolate()));
548 }
else if (op->IsRegister()) {
550 }
else if (op->IsDoubleRegister()) {
551 Abort(kToOperandIsDoubleRegisterUnimplemented);
552 return Operand::Zero();
556 return Operand::Zero();
560 static int ArgumentsOffsetWithoutFrame(
int index) {
567 ASSERT(!op->IsRegister());
568 ASSERT(!op->IsDoubleRegister());
569 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
581 ASSERT(op->IsDoubleStackSlot());
594 Translation* translation) {
595 if (environment ==
NULL)
return;
598 int translation_size = environment->translation_size();
600 int height = translation_size - environment->parameter_count();
603 bool has_closure_id = !
info()->closure().is_null() &&
604 !
info()->closure().is_identical_to(environment->closure());
605 int closure_id = has_closure_id
606 ? DefineDeoptimizationLiteral(environment->closure())
607 : Translation::kSelfLiteralId;
609 switch (environment->frame_type()) {
611 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
614 translation->BeginConstructStubFrame(closure_id, translation_size);
617 ASSERT(translation_size == 1);
619 translation->BeginGetterStubFrame(closure_id);
622 ASSERT(translation_size == 2);
624 translation->BeginSetterStubFrame(closure_id);
627 translation->BeginCompiledStubFrame();
630 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
634 int object_index = 0;
635 int dematerialized_index = 0;
636 for (
int i = 0; i < translation_size; ++i) {
637 LOperand* value = environment->values()->at(i);
638 AddToTranslation(environment,
641 environment->HasTaggedValueAt(i),
642 environment->HasUint32ValueAt(i),
644 &dematerialized_index);
649 void LCodeGen::AddToTranslation(LEnvironment* environment,
650 Translation* translation,
654 int* object_index_pointer,
655 int* dematerialized_index_pointer) {
656 if (op == LEnvironment::materialization_marker()) {
657 int object_index = (*object_index_pointer)++;
658 if (environment->ObjectIsDuplicateAt(object_index)) {
659 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
660 translation->DuplicateObject(dupe_of);
663 int object_length = environment->ObjectLengthAt(object_index);
664 if (environment->ObjectIsArgumentsAt(object_index)) {
665 translation->BeginArgumentsObject(object_length);
667 translation->BeginCapturedObject(object_length);
669 int dematerialized_index = *dematerialized_index_pointer;
670 int env_offset = environment->translation_size() + dematerialized_index;
671 *dematerialized_index_pointer += object_length;
672 for (
int i = 0; i < object_length; ++i) {
673 LOperand* value = environment->values()->at(env_offset + i);
674 AddToTranslation(environment,
677 environment->HasTaggedValueAt(env_offset + i),
678 environment->HasUint32ValueAt(env_offset + i),
679 object_index_pointer,
680 dematerialized_index_pointer);
685 if (op->IsStackSlot()) {
687 translation->StoreStackSlot(op->
index());
688 }
else if (is_uint32) {
689 translation->StoreUint32StackSlot(op->
index());
691 translation->StoreInt32StackSlot(op->
index());
693 }
else if (op->IsDoubleStackSlot()) {
694 translation->StoreDoubleStackSlot(op->
index());
695 }
else if (op->IsRegister()) {
698 translation->StoreRegister(reg);
699 }
else if (is_uint32) {
700 translation->StoreUint32Register(reg);
702 translation->StoreInt32Register(reg);
704 }
else if (op->IsDoubleRegister()) {
706 translation->StoreDoubleRegister(reg);
707 }
else if (op->IsConstantOperand()) {
708 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
709 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
710 translation->StoreLiteral(src_index);
717 void LCodeGen::CallCode(Handle<Code> code,
718 RelocInfo::Mode
mode,
721 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
725 void LCodeGen::CallCodeGeneric(Handle<Code> code,
726 RelocInfo::Mode
mode,
728 SafepointMode safepoint_mode,
735 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
739 if (code->kind() == Code::BINARY_OP_IC ||
740 code->kind() == Code::COMPARE_IC) {
746 void LCodeGen::CallRuntime(
const Runtime::Function*
function,
752 __ CallRuntime(
function, num_arguments, save_doubles);
754 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
758 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
759 if (context->IsRegister()) {
761 }
else if (context->IsStackSlot()) {
763 }
else if (context->IsConstantOperand()) {
764 HConstant* constant =
765 chunk_->LookupConstant(LConstantOperand::cast(context));
766 __ Move(
cp, Handle<Object>::cast(constant->handle(isolate())));
777 LoadContextFromDeferred(context);
778 __ CallRuntimeSaveDoubles(
id);
779 RecordSafepointWithRegisters(
780 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
784 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
785 Safepoint::DeoptMode mode) {
786 if (!environment->HasBeenRegistered()) {
801 int jsframe_count = 0;
808 Translation translation(&translations_, frame_count, jsframe_count, zone());
810 int deoptimization_index = deoptimizations_.length();
811 int pc_offset = masm()->pc_offset();
812 environment->Register(deoptimization_index,
814 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
815 deoptimizations_.
Add(environment, zone());
820 void LCodeGen::DeoptimizeIf(
Condition condition,
821 LEnvironment* environment,
823 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
824 ASSERT(environment->HasBeenRegistered());
825 int id = environment->deoptimization_index();
830 Abort(kBailoutWasNotPrepared);
834 if (FLAG_deopt_every_n_times != 0 && !
info()->IsStub()) {
835 Register scratch = scratch0();
836 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
839 if (condition !=
al) {
841 __ mov(scratch, Operand(1),
LeaveCC, condition);
846 __ mov(scratch, Operand(count));
849 __ movw(
r1, FLAG_deopt_every_n_times,
eq);
853 if (condition !=
al) {
863 if (condition !=
al) {
865 __ cmp(scratch, Operand::Zero());
869 if (
info()->ShouldTrapOnDeopt()) {
870 __ stop(
"trap_on_deopt", condition);
876 if (condition ==
al && frame_is_built_ &&
877 !
info()->saves_caller_doubles()) {
882 if (deopt_jump_table_.is_empty() ||
883 (deopt_jump_table_.last().address != entry) ||
884 (deopt_jump_table_.last().bailout_type != bailout_type) ||
885 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
886 Deoptimizer::JumpTableEntry table_entry(entry,
889 deopt_jump_table_.Add(table_entry, zone());
891 __ b(condition, &deopt_jump_table_.last().label);
896 void LCodeGen::DeoptimizeIf(
Condition condition,
897 LEnvironment* environment) {
901 DeoptimizeIf(condition, environment, bailout_type);
905 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
906 int length = deoptimizations_.length();
907 if (length == 0)
return;
908 Handle<DeoptimizationInputData> data =
909 factory()->NewDeoptimizationInputData(length,
TENURED);
911 Handle<ByteArray> translations =
912 translations_.CreateByteArray(isolate()->factory());
913 data->SetTranslationByteArray(*translations);
914 data->SetInlinedFunctionCount(
Smi::FromInt(inlined_function_count_));
915 data->SetOptimizationId(
Smi::FromInt(info_->optimization_id()));
916 if (info_->IsOptimizing()) {
919 data->SetSharedFunctionInfo(*info_->shared_info());
925 factory()->NewFixedArray(deoptimization_literals_.length(),
TENURED);
927 for (
int i = 0; i < deoptimization_literals_.length(); i++) {
928 literals->set(i, *deoptimization_literals_[i]);
930 data->SetLiteralArray(*literals);
933 data->SetOsrAstId(
Smi::FromInt(info_->osr_ast_id().ToInt()));
937 for (
int i = 0; i < length; i++) {
939 data->SetAstId(i, env->ast_id());
940 data->SetTranslationIndex(i,
Smi::FromInt(env->translation_index()));
941 data->SetArgumentsStackHeight(i,
945 code->set_deoptimization_data(*data);
949 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
950 int result = deoptimization_literals_.length();
951 for (
int i = 0; i < deoptimization_literals_.length(); ++i) {
952 if (deoptimization_literals_[i].is_identical_to(literal))
return i;
954 deoptimization_literals_.Add(literal, zone());
959 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
960 ASSERT(deoptimization_literals_.length() == 0);
962 const ZoneList<Handle<JSFunction> >* inlined_closures =
963 chunk()->inlined_closures();
965 for (
int i = 0, length = inlined_closures->length();
968 DefineDeoptimizationLiteral(inlined_closures->at(i));
971 inlined_function_count_ = deoptimization_literals_.length();
975 void LCodeGen::RecordSafepointWithLazyDeopt(
976 LInstruction* instr, SafepointMode safepoint_mode) {
977 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
978 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
980 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
981 RecordSafepointWithRegisters(
982 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
987 void LCodeGen::RecordSafepoint(
988 LPointerMap* pointers,
989 Safepoint::Kind kind,
991 Safepoint::DeoptMode deopt_mode) {
992 ASSERT(expected_safepoint_kind_ == kind);
994 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
995 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
996 kind, arguments, deopt_mode);
997 for (
int i = 0; i < operands->length(); i++) {
998 LOperand* pointer = operands->at(i);
999 if (pointer->IsStackSlot()) {
1000 safepoint.DefinePointerSlot(pointer->index(), zone());
1001 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1002 safepoint.DefinePointerRegister(
ToRegister(pointer), zone());
1005 if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
1007 safepoint.DefinePointerRegister(
pp, zone());
1012 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1013 Safepoint::DeoptMode deopt_mode) {
1014 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1018 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1019 LPointerMap empty_pointers(zone());
1020 RecordSafepoint(&empty_pointers, deopt_mode);
1024 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1026 Safepoint::DeoptMode deopt_mode) {
1028 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1032 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
1033 LPointerMap* pointers,
1035 Safepoint::DeoptMode deopt_mode) {
1037 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
1041 void LCodeGen::RecordAndWritePosition(
int position) {
1042 if (position == RelocInfo::kNoPosition)
return;
1043 masm()->positions_recorder()->RecordPosition(position);
1044 masm()->positions_recorder()->WriteRecordedPositions();
1048 static const char* LabelType(LLabel* label) {
1049 if (label->is_loop_header())
return " (loop header)";
1050 if (label->is_osr_entry())
return " (OSR entry)";
1055 void LCodeGen::DoLabel(LLabel* label) {
1056 Comment(
";;; <@%d,#%d> -------------------- B%d%s --------------------",
1057 current_instruction_,
1058 label->hydrogen_value()->id(),
1061 __ bind(label->label());
1062 current_block_ = label->block_id();
1068 resolver_.Resolve(move);
1083 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1088 void LCodeGen::DoParameter(LParameter* instr) {
1093 void LCodeGen::DoCallStub(LCallStub* instr) {
1096 switch (instr->hydrogen()->major_key()) {
1097 case CodeStub::RegExpExec: {
1098 RegExpExecStub stub;
1099 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1102 case CodeStub::SubString: {
1104 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1107 case CodeStub::StringCompare: {
1108 StringCompareStub stub;
1109 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1118 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1119 GenerateOsrPrologue();
1123 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1124 Register dividend =
ToRegister(instr->dividend());
1125 int32_t divisor = instr->divisor();
1134 HMod* hmod = instr->hydrogen();
1135 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1136 Label dividend_is_not_negative, done;
1138 __ cmp(dividend, Operand::Zero());
1139 __ b(
pl, ÷nd_is_not_negative);
1141 __ rsb(dividend, dividend, Operand::Zero());
1142 __ and_(dividend, dividend, Operand(mask));
1143 __ rsb(dividend, dividend, Operand::Zero(),
SetCC);
1145 DeoptimizeIf(
eq, instr->environment());
1150 __ bind(÷nd_is_not_negative);
1151 __ and_(dividend, dividend, Operand(mask));
1156 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1157 Register dividend =
ToRegister(instr->dividend());
1158 int32_t divisor = instr->divisor();
1159 Register result =
ToRegister(instr->result());
1160 ASSERT(!dividend.is(result));
1163 DeoptimizeIf(
al, instr->environment());
1167 __ TruncatingDiv(result, dividend,
Abs(divisor));
1168 __ mov(
ip, Operand(
Abs(divisor)));
1169 __ smull(result,
ip, result,
ip);
1170 __ sub(result, dividend, result,
SetCC);
1173 HMod* hmod = instr->hydrogen();
1175 Label remainder_not_zero;
1176 __ b(
ne, &remainder_not_zero);
1177 __ cmp(dividend, Operand::Zero());
1178 DeoptimizeIf(
lt, instr->environment());
1179 __ bind(&remainder_not_zero);
1184 void LCodeGen::DoModI(LModI* instr) {
1185 HMod* hmod = instr->hydrogen();
1187 CpuFeatureScope scope(masm(),
SUDIV);
1189 Register left_reg =
ToRegister(instr->left());
1190 Register right_reg =
ToRegister(instr->right());
1191 Register result_reg =
ToRegister(instr->result());
1197 __ cmp(right_reg, Operand::Zero());
1198 DeoptimizeIf(
eq, instr->environment());
1204 Label no_overflow_possible;
1206 __ b(
ne, &no_overflow_possible);
1207 __ cmp(right_reg, Operand(-1));
1209 DeoptimizeIf(
eq, instr->environment());
1211 __ b(
ne, &no_overflow_possible);
1212 __ mov(result_reg, Operand::Zero());
1215 __ bind(&no_overflow_possible);
1222 __ sdiv(result_reg, left_reg, right_reg);
1223 __ mls(result_reg, result_reg, right_reg, left_reg);
1227 __ cmp(result_reg, Operand::Zero());
1229 __ cmp(left_reg, Operand::Zero());
1230 DeoptimizeIf(
lt, instr->environment());
1236 Register left_reg =
ToRegister(instr->left());
1237 Register right_reg =
ToRegister(instr->right());
1238 Register result_reg =
ToRegister(instr->result());
1239 Register scratch = scratch0();
1240 ASSERT(!scratch.is(left_reg));
1241 ASSERT(!scratch.is(right_reg));
1242 ASSERT(!scratch.is(result_reg));
1245 ASSERT(!divisor.is(dividend));
1246 LowDwVfpRegister quotient = double_scratch0();
1247 ASSERT(!quotient.is(dividend));
1248 ASSERT(!quotient.is(divisor));
1254 __ cmp(right_reg, Operand::Zero());
1255 DeoptimizeIf(
eq, instr->environment());
1258 __ Move(result_reg, left_reg);
1262 __ vmov(double_scratch0().low(), left_reg);
1263 __ vcvt_f64_s32(dividend, double_scratch0().low());
1264 __ vmov(double_scratch0().low(), right_reg);
1265 __ vcvt_f64_s32(divisor, double_scratch0().low());
1269 __ vabs(divisor, divisor);
1271 __ vdiv(quotient, dividend, divisor);
1272 __ vcvt_s32_f64(quotient.low(), quotient);
1273 __ vcvt_f64_s32(quotient, quotient.low());
1276 __ vmul(double_scratch0(), divisor, quotient);
1277 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1278 __ vmov(scratch, double_scratch0().low());
1279 __ sub(result_reg, left_reg, scratch,
SetCC);
1284 __ cmp(left_reg, Operand::Zero());
1285 DeoptimizeIf(
mi, instr->environment());
1292 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1293 Register dividend =
ToRegister(instr->dividend());
1294 int32_t divisor = instr->divisor();
1295 Register result =
ToRegister(instr->result());
1297 ASSERT(!result.is(dividend));
1300 HDiv* hdiv = instr->hydrogen();
1302 __ cmp(dividend, Operand::Zero());
1303 DeoptimizeIf(
eq, instr->environment());
1308 DeoptimizeIf(
eq, instr->environment());
1312 divisor != 1 && divisor != -1) {
1313 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1314 __ tst(dividend, Operand(mask));
1315 DeoptimizeIf(
ne, instr->environment());
1318 if (divisor == -1) {
1319 __ rsb(result, dividend, Operand(0));
1324 __ mov(result, dividend);
1325 }
else if (shift == 1) {
1326 __ add(result, dividend, Operand(dividend,
LSR, 31));
1328 __ mov(result, Operand(dividend,
ASR, 31));
1329 __ add(result, dividend, Operand(result,
LSR, 32 - shift));
1331 if (shift > 0)
__ mov(result, Operand(result,
ASR, shift));
1332 if (divisor < 0)
__ rsb(result, result, Operand(0));
1336 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1337 Register dividend =
ToRegister(instr->dividend());
1338 int32_t divisor = instr->divisor();
1339 Register result =
ToRegister(instr->result());
1340 ASSERT(!dividend.is(result));
1343 DeoptimizeIf(
al, instr->environment());
1348 HDiv* hdiv = instr->hydrogen();
1350 __ cmp(dividend, Operand::Zero());
1351 DeoptimizeIf(
eq, instr->environment());
1354 __ TruncatingDiv(result, dividend,
Abs(divisor));
1355 if (divisor < 0)
__ rsb(result, result, Operand::Zero());
1358 __ mov(
ip, Operand(divisor));
1359 __ smull(scratch0(),
ip, result,
ip);
1360 __ sub(scratch0(), scratch0(), dividend,
SetCC);
1361 DeoptimizeIf(
ne, instr->environment());
1366 void LCodeGen::DoDivI(LDivI* instr) {
1367 HBinaryOperation* hdiv = instr->hydrogen();
1370 Register result =
ToRegister(instr->result());
1374 __ cmp(right, Operand::Zero());
1375 DeoptimizeIf(
eq, instr->environment());
1383 __ cmp(right, Operand::Zero());
1385 __ b(
pl, &positive);
1386 __ cmp(left, Operand::Zero());
1387 DeoptimizeIf(
eq, instr->environment());
1398 __ cmp(right, Operand(-1),
eq);
1399 DeoptimizeIf(
eq, instr->environment());
1403 CpuFeatureScope scope(masm(),
SUDIV);
1404 __ sdiv(result, left, right);
1408 __ vmov(double_scratch0().low(), left);
1409 __ vcvt_f64_s32(vleft, double_scratch0().low());
1410 __ vmov(double_scratch0().low(), right);
1411 __ vcvt_f64_s32(vright, double_scratch0().low());
1412 __ vdiv(vleft, vleft, vright);
1413 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1414 __ vmov(result, double_scratch0().low());
1417 if (hdiv->IsMathFloorOfDiv()) {
1419 Register remainder = scratch0();
1420 __ mls(remainder, result, right, left);
1421 __ cmp(remainder, Operand::Zero());
1423 __ eor(remainder, remainder, Operand(right));
1424 __ add(result, result, Operand(remainder,
ASR, 31));
1428 Register remainder = scratch0();
1429 __ mls(remainder, result, right, left);
1430 __ cmp(remainder, Operand::Zero());
1431 DeoptimizeIf(
ne, instr->environment());
1436 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1444 __ vmla(addend, multiplier, multiplicand);
1448 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1456 __ vmls(minuend, multiplier, multiplicand);
1460 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1461 Register dividend =
ToRegister(instr->dividend());
1462 Register result =
ToRegister(instr->result());
1463 int32_t divisor = instr->divisor();
1467 if (divisor == 1)
return;
1470 __ mov(result, Operand(dividend,
ASR, shift));
1475 __ rsb(result, dividend, Operand::Zero(),
SetCC);
1477 DeoptimizeIf(
eq, instr->environment());
1482 if (divisor == -1) {
1483 DeoptimizeIf(
vs, instr->environment());
1484 __ mov(result, Operand(dividend,
ASR, shift));
1490 __ mov(result, Operand(dividend,
ASR, shift));
1495 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1496 Register dividend =
ToRegister(instr->dividend());
1497 int32_t divisor = instr->divisor();
1498 Register result =
ToRegister(instr->result());
1499 ASSERT(!dividend.is(result));
1502 DeoptimizeIf(
al, instr->environment());
1507 HMathFloorOfDiv* hdiv = instr->hydrogen();
1509 __ cmp(dividend, Operand::Zero());
1510 DeoptimizeIf(
eq, instr->environment());
1517 __ TruncatingDiv(result, dividend,
Abs(divisor));
1518 if (divisor < 0)
__ rsb(result, result, Operand::Zero());
1525 ASSERT(!temp.is(dividend) && !temp.is(result));
1526 Label needs_adjustment, done;
1527 __ cmp(dividend, Operand::Zero());
1528 __ b(divisor > 0 ?
lt :
gt, &needs_adjustment);
1529 __ TruncatingDiv(result, dividend,
Abs(divisor));
1530 if (divisor < 0)
__ rsb(result, result, Operand::Zero());
1532 __ bind(&needs_adjustment);
1533 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1534 __ TruncatingDiv(result, temp,
Abs(divisor));
1535 if (divisor < 0)
__ rsb(result, result, Operand::Zero());
1536 __ sub(result, result, Operand(1));
1541 void LCodeGen::DoMulI(LMulI* instr) {
1542 Register result =
ToRegister(instr->result());
1545 LOperand* right_op = instr->right();
1547 bool bailout_on_minus_zero =
1551 if (right_op->IsConstantOperand()) {
1554 if (bailout_on_minus_zero && (constant < 0)) {
1557 __ cmp(left, Operand::Zero());
1558 DeoptimizeIf(
eq, instr->environment());
1564 __ rsb(result, left, Operand::Zero(),
SetCC);
1565 DeoptimizeIf(
vs, instr->environment());
1567 __ rsb(result, left, Operand::Zero());
1571 if (bailout_on_minus_zero) {
1574 __ cmp(left, Operand::Zero());
1575 DeoptimizeIf(
mi, instr->environment());
1577 __ mov(result, Operand::Zero());
1580 __ Move(result, left);
1586 int32_t mask = constant >> 31;
1587 uint32_t constant_abs = (constant + mask) ^ mask;
1591 __ mov(result, Operand(left,
LSL, shift));
1593 if (constant < 0)
__ rsb(result, result, Operand::Zero());
1596 __ add(result, left, Operand(left,
LSL, shift));
1598 if (constant < 0)
__ rsb(result, result, Operand::Zero());
1601 __ rsb(result, left, Operand(left,
LSL, shift));
1603 if (constant < 0)
__ rsb(result, result, Operand::Zero());
1606 __ mov(
ip, Operand(constant));
1607 __ mul(result, left,
ip);
1612 ASSERT(right_op->IsRegister());
1616 Register scratch = scratch0();
1618 if (instr->hydrogen()->representation().IsSmi()) {
1619 __ SmiUntag(result, left);
1620 __ smull(result, scratch, result, right);
1622 __ smull(result, scratch, left, right);
1624 __ cmp(scratch, Operand(result,
ASR, 31));
1625 DeoptimizeIf(
ne, instr->environment());
1627 if (instr->hydrogen()->representation().IsSmi()) {
1628 __ SmiUntag(result, left);
1629 __ mul(result, result, right);
1631 __ mul(result, left, right);
1635 if (bailout_on_minus_zero) {
1637 __ teq(left, Operand(right));
1640 __ cmp(result, Operand::Zero());
1641 DeoptimizeIf(
eq, instr->environment());
1648 void LCodeGen::DoBitI(LBitI* instr) {
1649 LOperand* left_op = instr->left();
1650 LOperand* right_op = instr->right();
1651 ASSERT(left_op->IsRegister());
1653 Register result =
ToRegister(instr->result());
1656 if (right_op->IsStackSlot()) {
1659 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1663 switch (instr->op()) {
1664 case Token::BIT_AND:
1665 __ and_(result, left, right);
1668 __ orr(result, left, right);
1670 case Token::BIT_XOR:
1671 if (right_op->IsConstantOperand() && right.immediate() ==
int32_t(~0)) {
1672 __ mvn(result, Operand(left));
1674 __ eor(result, left, right);
1684 void LCodeGen::DoShiftI(LShiftI* instr) {
1687 LOperand* right_op = instr->right();
1689 Register result =
ToRegister(instr->result());
1690 Register scratch = scratch0();
1691 if (right_op->IsRegister()) {
1694 switch (instr->op()) {
1696 __ mov(result, Operand(left,
ROR, scratch));
1699 __ mov(result, Operand(left,
ASR, scratch));
1702 if (instr->can_deopt()) {
1703 __ mov(result, Operand(left,
LSR, scratch),
SetCC);
1704 DeoptimizeIf(
mi, instr->environment());
1706 __ mov(result, Operand(left,
LSR, scratch));
1710 __ mov(result, Operand(left,
LSL, scratch));
1718 int value =
ToInteger32(LConstantOperand::cast(right_op));
1719 uint8_t shift_count =
static_cast<uint8_t
>(value & 0x1F);
1720 switch (instr->op()) {
1722 if (shift_count != 0) {
1723 __ mov(result, Operand(left,
ROR, shift_count));
1725 __ Move(result, left);
1729 if (shift_count != 0) {
1730 __ mov(result, Operand(left,
ASR, shift_count));
1732 __ Move(result, left);
1736 if (shift_count != 0) {
1737 __ mov(result, Operand(left,
LSR, shift_count));
1739 if (instr->can_deopt()) {
1740 __ tst(left, Operand(0x80000000));
1741 DeoptimizeIf(
ne, instr->environment());
1743 __ Move(result, left);
1747 if (shift_count != 0) {
1748 if (instr->hydrogen_value()->representation().IsSmi() &&
1749 instr->can_deopt()) {
1750 if (shift_count != 1) {
1751 __ mov(result, Operand(left,
LSL, shift_count - 1));
1752 __ SmiTag(result, result,
SetCC);
1754 __ SmiTag(result, left,
SetCC);
1756 DeoptimizeIf(
vs, instr->environment());
1758 __ mov(result, Operand(left,
LSL, shift_count));
1761 __ Move(result, left);
1772 void LCodeGen::DoSubI(LSubI* instr) {
1773 LOperand* left = instr->left();
1774 LOperand* right = instr->right();
1775 LOperand* result = instr->result();
1779 if (right->IsStackSlot()) {
1783 ASSERT(right->IsRegister() || right->IsConstantOperand());
1788 DeoptimizeIf(
vs, instr->environment());
1793 void LCodeGen::DoRSubI(LRSubI* instr) {
1794 LOperand* left = instr->left();
1795 LOperand* right = instr->right();
1796 LOperand* result = instr->result();
1800 if (right->IsStackSlot()) {
1804 ASSERT(right->IsRegister() || right->IsConstantOperand());
1809 DeoptimizeIf(
vs, instr->environment());
1814 void LCodeGen::DoConstantI(LConstantI* instr) {
1815 __ mov(
ToRegister(instr->result()), Operand(instr->value()));
1819 void LCodeGen::DoConstantS(LConstantS* instr) {
1820 __ mov(
ToRegister(instr->result()), Operand(instr->value()));
1824 void LCodeGen::DoConstantD(LConstantD* instr) {
1825 ASSERT(instr->result()->IsDoubleRegister());
1827 double v = instr->value();
1828 __ Vmov(result, v, scratch0());
1832 void LCodeGen::DoConstantE(LConstantE* instr) {
1833 __ mov(
ToRegister(instr->result()), Operand(instr->value()));
1837 void LCodeGen::DoConstantT(LConstantT* instr) {
1838 Handle<Object> value = instr->value(isolate());
1844 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1845 Register result =
ToRegister(instr->result());
1847 __ EnumLength(result, map);
1851 void LCodeGen::DoDateField(LDateField* instr) {
1853 Register result =
ToRegister(instr->result());
1854 Register scratch =
ToRegister(instr->temp());
1855 Smi* index = instr->index();
1856 Label runtime, done;
1857 ASSERT(
object.is(result));
1859 ASSERT(!scratch.is(scratch0()));
1860 ASSERT(!scratch.is(
object));
1863 DeoptimizeIf(
eq, instr->environment());
1865 DeoptimizeIf(
ne, instr->environment());
1867 if (index->value() == 0) {
1871 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1872 __ mov(scratch, Operand(stamp));
1875 __ cmp(scratch, scratch0());
1882 __ PrepareCallCFunction(2, scratch);
1883 __ mov(
r1, Operand(index));
1884 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1890 MemOperand LCodeGen::BuildSeqStringOperand(Register
string,
1893 if (index->IsConstantOperand()) {
1894 int offset =
ToInteger32(LConstantOperand::cast(index));
1901 Register scratch = scratch0();
1902 ASSERT(!scratch.is(
string));
1914 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1916 Register
string =
ToRegister(instr->string());
1917 Register result =
ToRegister(instr->result());
1919 if (FLAG_debug_code) {
1920 Register scratch = scratch0();
1924 __ and_(scratch, scratch,
1929 ? one_byte_seq_type : two_byte_seq_type));
1930 __ Check(
eq, kUnexpectedStringType);
1933 MemOperand operand = BuildSeqStringOperand(
string, instr->index(), encoding);
1935 __ ldrb(result, operand);
1937 __ ldrh(result, operand);
1942 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1944 Register
string =
ToRegister(instr->string());
1947 if (FLAG_debug_code) {
1953 ? one_byte_seq_type : two_byte_seq_type;
1954 __ EmitSeqStringSetCharCheck(
string, index, value, encoding_mask);
1957 MemOperand operand = BuildSeqStringOperand(
string, instr->index(), encoding);
1959 __ strb(value, operand);
1961 __ strh(value, operand);
1966 void LCodeGen::DoAddI(LAddI* instr) {
1967 LOperand* left = instr->left();
1968 LOperand* right = instr->right();
1969 LOperand* result = instr->result();
1973 if (right->IsStackSlot()) {
1977 ASSERT(right->IsRegister() || right->IsConstantOperand());
1982 DeoptimizeIf(
vs, instr->environment());
1987 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1988 LOperand* left = instr->left();
1989 LOperand* right = instr->right();
1990 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1991 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1992 Condition condition = (operation == HMathMinMax::kMathMin) ?
le :
ge;
1994 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1997 Register result_reg =
ToRegister(instr->result());
1998 __ cmp(left_reg, right_op);
1999 __ Move(result_reg, left_reg, condition);
2002 ASSERT(instr->hydrogen()->representation().IsDouble());
2006 Label result_is_nan, return_left, return_right, check_zero, done;
2007 __ VFPCompareAndSetFlags(left_reg, right_reg);
2008 if (operation == HMathMinMax::kMathMin) {
2009 __ b(
mi, &return_left);
2010 __ b(
gt, &return_right);
2012 __ b(
mi, &return_right);
2013 __ b(
gt, &return_left);
2015 __ b(
vs, &result_is_nan);
2017 __ VFPCompareAndSetFlags(left_reg, 0.0);
2018 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2021 __ b(
ne, &return_left);
2024 if (operation == HMathMinMax::kMathMin) {
2026 __ vneg(left_reg, left_reg);
2027 __ vsub(result_reg, left_reg, right_reg);
2028 __ vneg(result_reg, result_reg);
2032 __ vadd(result_reg, left_reg, right_reg);
2036 __ bind(&result_is_nan);
2037 __ vadd(result_reg, left_reg, right_reg);
2040 __ bind(&return_right);
2041 __ Move(result_reg, right_reg);
2042 if (!left_reg.is(result_reg)) {
2046 __ bind(&return_left);
2047 __ Move(result_reg, left_reg);
2054 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2058 switch (instr->op()) {
2060 __ vadd(result, left, right);
2063 __ vsub(result, left, right);
2066 __ vmul(result, left, right);
2069 __ vdiv(result, left, right);
2072 __ PrepareCallCFunction(0, 2, scratch0());
2073 __ MovToFloatParameters(left, right);
2075 ExternalReference::mod_two_doubles_operation(isolate()),
2078 __ MovFromFloatResult(result);
2088 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2098 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2102 template<
class InstrType>
2103 void LCodeGen::EmitBranch(InstrType instr,
Condition condition) {
2104 int left_block = instr->TrueDestination(chunk_);
2105 int right_block = instr->FalseDestination(chunk_);
2107 int next_block = GetNextEmittedBlock();
2109 if (right_block == left_block || condition ==
al) {
2110 EmitGoto(left_block);
2111 }
else if (left_block == next_block) {
2113 }
else if (right_block == next_block) {
2114 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2116 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2117 __ b(chunk_->GetAssemblyLabel(right_block));
2122 template<
class InstrType>
2123 void LCodeGen::EmitFalseBranch(InstrType instr,
Condition condition) {
2124 int false_block = instr->FalseDestination(chunk_);
2125 __ b(condition, chunk_->GetAssemblyLabel(false_block));
2129 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2134 void LCodeGen::DoBranch(LBranch* instr) {
2135 Representation r = instr->hydrogen()->value()->representation();
2136 if (r.IsInteger32() || r.IsSmi()) {
2139 __ cmp(reg, Operand::Zero());
2140 EmitBranch(instr,
ne);
2141 }
else if (r.IsDouble()) {
2145 __ VFPCompareAndSetFlags(reg, 0.0);
2147 EmitBranch(instr,
ne);
2151 HType type = instr->hydrogen()->value()->
type();
2152 if (type.IsBoolean()) {
2154 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2155 EmitBranch(instr,
eq);
2156 }
else if (type.IsSmi()) {
2158 __ cmp(reg, Operand::Zero());
2159 EmitBranch(instr,
ne);
2160 }
else if (type.IsJSArray()) {
2162 EmitBranch(instr,
al);
2163 }
else if (type.IsHeapNumber()) {
2165 DwVfpRegister dbl_scratch = double_scratch0();
2168 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2170 EmitBranch(instr,
ne);
2171 }
else if (type.IsString()) {
2174 __ cmp(
ip, Operand::Zero());
2175 EmitBranch(instr,
ne);
2177 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2183 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2184 __ b(
eq, instr->FalseLabel(chunk_));
2188 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2189 __ b(
eq, instr->TrueLabel(chunk_));
2190 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2191 __ b(
eq, instr->FalseLabel(chunk_));
2195 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2196 __ b(
eq, instr->FalseLabel(chunk_));
2201 __ cmp(reg, Operand::Zero());
2202 __ b(
eq, instr->FalseLabel(chunk_));
2203 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2204 }
else if (expected.NeedsMap()) {
2207 DeoptimizeIf(
eq, instr->environment());
2210 const Register map = scratch0();
2211 if (expected.NeedsMap()) {
2214 if (expected.CanBeUndetectable()) {
2218 __ b(
ne, instr->FalseLabel(chunk_));
2225 __ b(
ge, instr->TrueLabel(chunk_));
2232 __ b(
ge, ¬_string);
2234 __ cmp(
ip, Operand::Zero());
2235 __ b(
ne, instr->TrueLabel(chunk_));
2236 __ b(instr->FalseLabel(chunk_));
2237 __ bind(¬_string);
2243 __ b(
eq, instr->TrueLabel(chunk_));
2248 DwVfpRegister dbl_scratch = double_scratch0();
2249 Label not_heap_number;
2250 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2251 __ b(
ne, ¬_heap_number);
2253 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2255 __ b(
eq, instr->FalseLabel(chunk_));
2256 __ b(instr->TrueLabel(chunk_));
2257 __ bind(¬_heap_number);
2260 if (!expected.IsGeneric()) {
2263 DeoptimizeIf(
al, instr->environment());
2270 void LCodeGen::EmitGoto(
int block) {
2277 void LCodeGen::DoGoto(LGoto* instr) {
2278 EmitGoto(instr->block_id());
2286 case Token::EQ_STRICT:
2290 case Token::NE_STRICT:
2294 cond = is_unsigned ?
lo :
lt;
2297 cond = is_unsigned ?
hi :
gt;
2300 cond = is_unsigned ?
ls :
le;
2303 cond = is_unsigned ?
hs :
ge;
2306 case Token::INSTANCEOF:
2314 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2315 LOperand* left = instr->left();
2316 LOperand* right = instr->right();
2317 Condition cond = TokenToCondition(instr->op(),
false);
2319 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2321 double left_val =
ToDouble(LConstantOperand::cast(left));
2322 double right_val =
ToDouble(LConstantOperand::cast(right));
2323 int next_block =
EvalComparison(instr->op(), left_val, right_val) ?
2324 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2325 EmitGoto(next_block);
2327 if (instr->is_double()) {
2333 __ b(
vs, instr->FalseLabel(chunk_));
2335 if (right->IsConstantOperand()) {
2337 if (instr->hydrogen_value()->representation().IsSmi()) {
2342 }
else if (left->IsConstantOperand()) {
2344 if (instr->hydrogen_value()->representation().IsSmi()) {
2355 EmitBranch(instr, cond);
2360 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2364 __ cmp(left, Operand(right));
2365 EmitBranch(instr,
eq);
2369 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2370 if (instr->hydrogen()->representation().IsTagged()) {
2371 Register input_reg =
ToRegister(instr->object());
2372 __ mov(
ip, Operand(factory()->the_hole_value()));
2373 __ cmp(input_reg,
ip);
2374 EmitBranch(instr,
eq);
2379 __ VFPCompareAndSetFlags(input_reg, input_reg);
2380 EmitFalseBranch(instr,
vc);
2382 Register scratch = scratch0();
2383 __ VmovHigh(scratch, input_reg);
2385 EmitBranch(instr,
eq);
2389 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2390 Representation rep = instr->hydrogen()->value()->representation();
2391 ASSERT(!rep.IsInteger32());
2392 Register scratch =
ToRegister(instr->temp());
2394 if (rep.IsDouble()) {
2396 __ VFPCompareAndSetFlags(value, 0.0);
2397 EmitFalseBranch(instr,
ne);
2398 __ VmovHigh(scratch, value);
2399 __ cmp(scratch, Operand(0x80000000));
2404 Heap::kHeapNumberMapRootIndex,
2405 instr->FalseLabel(chunk()),
2409 __ cmp(scratch, Operand(0x80000000));
2410 __ cmp(
ip, Operand(0x00000000),
eq);
2412 EmitBranch(instr,
eq);
2416 Condition LCodeGen::EmitIsObject(Register input,
2418 Label* is_not_object,
2420 Register temp2 = scratch0();
2421 __ JumpIfSmi(input, is_not_object);
2423 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2424 __ cmp(input, temp2);
2425 __ b(
eq, is_object);
2432 __ b(
ne, is_not_object);
2437 __ b(
lt, is_not_object);
2443 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2448 EmitIsObject(reg, temp1,
2449 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2451 EmitBranch(instr, true_cond);
2455 Condition LCodeGen::EmitIsString(Register input,
2457 Label* is_not_string,
2460 __ JumpIfSmi(input, is_not_string);
2468 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2473 instr->hydrogen()->value()->IsHeapObject()
2476 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2478 EmitBranch(instr, true_cond);
2482 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2484 __ SmiTst(input_reg);
2485 EmitBranch(instr,
eq);
2489 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2493 if (!instr->hydrogen()->value()->IsHeapObject()) {
2494 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2499 EmitBranch(instr,
ne);
2505 case Token::EQ_STRICT:
2523 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2528 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2530 __ cmp(
r0, Operand::Zero());
2532 Condition condition = ComputeCompareCondition(op);
2534 EmitBranch(instr, condition);
2538 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2547 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2550 if (from == to)
return eq;
2558 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2559 Register scratch = scratch0();
2562 if (!instr->hydrogen()->value()->IsHeapObject()) {
2563 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2566 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2567 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2571 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2573 Register result =
ToRegister(instr->result());
2575 __ AssertString(input);
2578 __ IndexFromHash(result, result);
2582 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2583 LHasCachedArrayIndexAndBranch* instr) {
2585 Register scratch = scratch0();
2590 EmitBranch(instr,
eq);
2596 void LCodeGen::EmitClassOfTest(Label* is_true,
2598 Handle<String>class_name,
2603 ASSERT(!input.is(temp2));
2606 __ JumpIfSmi(input, is_false);
2656 __ cmp(temp, Operand(class_name));
2661 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2663 Register temp = scratch0();
2665 Handle<String> class_name = instr->hydrogen()->class_name();
2667 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2668 class_name, input, temp, temp2);
2670 EmitBranch(instr,
eq);
2674 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2679 __ cmp(temp, Operand(instr->map()));
2680 EmitBranch(instr,
eq);
2684 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2690 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2692 __ cmp(
r0, Operand::Zero());
2698 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2701 DeferredInstanceOfKnownGlobal(
LCodeGen* codegen,
2702 LInstanceOfKnownGlobal* instr)
2705 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2707 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
2708 Label* map_check() {
return &map_check_; }
2710 LInstanceOfKnownGlobal* instr_;
2714 DeferredInstanceOfKnownGlobal* deferred;
2715 deferred =
new(zone()) DeferredInstanceOfKnownGlobal(
this, instr);
2717 Label done, false_result;
2718 Register
object =
ToRegister(instr->value());
2720 Register result =
ToRegister(instr->result());
2723 __ JumpIfSmi(
object, &false_result);
2729 Register map = temp;
2735 __ bind(deferred->map_check());
2740 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2741 __ mov(
ip, Operand(Handle<Object>(cell)));
2743 __ cmp(map, Operand(
ip));
2744 __ b(
ne, &cache_miss);
2748 __ mov(result, Operand(factory()->the_hole_value()));
2754 __ bind(&cache_miss);
2756 __ LoadRoot(
ip, Heap::kNullValueRootIndex);
2757 __ cmp(
object, Operand(
ip));
2758 __ b(
eq, &false_result);
2761 Condition is_string = masm_->IsObjectStringType(
object, temp);
2762 __ b(is_string, &false_result);
2765 __ b(deferred->entry());
2767 __ bind(&false_result);
2768 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2772 __ bind(deferred->exit());
2788 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
2789 LoadContextFromDeferred(instr->context());
2792 static const int kAdditionalDelta = 4;
2796 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2797 Label before_push_delta;
2798 __ bind(&before_push_delta);
2799 __ BlockConstPoolFor(kAdditionalDelta);
2804 if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2805 ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2808 CallCodeGeneric(stub.GetCode(isolate()),
2809 RelocInfo::CODE_TARGET,
2811 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2812 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2813 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2816 __ StoreToSafepointRegisterSlot(
r0,
ToRegister(instr->result()));
2820 void LCodeGen::DoCmpT(LCmpT* instr) {
2825 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2827 __ cmp(
r0, Operand::Zero());
2829 Condition condition = ComputeCompareCondition(op);
2831 Heap::kTrueValueRootIndex,
2834 Heap::kFalseValueRootIndex,
2839 void LCodeGen::DoReturn(LReturn* instr) {
2840 if (FLAG_trace &&
info()->IsOptimizing()) {
2847 __ CallRuntime(Runtime::kTraceExit, 1);
2849 if (
info()->saves_caller_doubles()) {
2850 RestoreCallerDoubles();
2852 int no_frame_start = -1;
2854 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2856 if (instr->has_constant_parameter_count()) {
2857 int parameter_count =
ToInteger32(instr->constant_parameter_count());
2859 if (sp_delta != 0) {
2860 __ add(
sp,
sp, Operand(sp_delta));
2863 Register reg =
ToRegister(instr->parameter_count());
2871 if (no_frame_start != -1) {
2872 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2877 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2878 Register result =
ToRegister(instr->result());
2879 __ mov(
ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2881 if (instr->hydrogen()->RequiresHoleCheck()) {
2882 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2884 DeoptimizeIf(
eq, instr->environment());
2889 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2894 __ mov(
r2, Operand(instr->name()));
2897 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2901 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2903 Register cell = scratch0();
2906 __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
2912 if (instr->hydrogen()->RequiresHoleCheck()) {
2914 Register payload =
ToRegister(instr->temp());
2916 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2917 DeoptimizeIf(
eq, instr->environment());
2926 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2927 Register context =
ToRegister(instr->context());
2928 Register result =
ToRegister(instr->result());
2930 if (instr->hydrogen()->RequiresHoleCheck()) {
2931 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2933 if (instr->hydrogen()->DeoptimizesOnHole()) {
2934 DeoptimizeIf(
eq, instr->environment());
2936 __ mov(result, Operand(factory()->undefined_value()),
LeaveCC,
eq);
2942 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2943 Register context =
ToRegister(instr->context());
2945 Register scratch = scratch0();
2948 Label skip_assignment;
2950 if (instr->hydrogen()->RequiresHoleCheck()) {
2951 __ ldr(scratch, target);
2952 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2953 __ cmp(scratch,
ip);
2954 if (instr->hydrogen()->DeoptimizesOnHole()) {
2955 DeoptimizeIf(
eq, instr->environment());
2957 __ b(
ne, &skip_assignment);
2961 __ str(value, target);
2962 if (instr->hydrogen()->NeedsWriteBarrier()) {
2964 instr->hydrogen()->value()->IsHeapObject()
2966 __ RecordWriteContextSlot(context,
2976 __ bind(&skip_assignment);
2980 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2981 HObjectAccess access = instr->hydrogen()->access();
2982 int offset = access.offset();
2983 Register
object =
ToRegister(instr->object());
2985 if (access.IsExternalMemory()) {
2986 Register result =
ToRegister(instr->result());
2988 __ Load(result, operand, access.representation());
2992 if (instr->hydrogen()->representation().IsDouble()) {
2998 Register result =
ToRegister(instr->result());
2999 if (!access.IsInobject()) {
3004 __ Load(result, operand, access.representation());
3008 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3014 __ mov(
r2, Operand(instr->name()));
3020 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3021 Register scratch = scratch0();
3022 Register
function =
ToRegister(instr->function());
3023 Register result =
ToRegister(instr->result());
3028 DeoptimizeIf(
ne, instr->environment());
3034 __ b(
ne, &non_instance);
3041 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
3043 DeoptimizeIf(
eq, instr->environment());
3047 __ CompareObjectType(result, scratch, scratch,
MAP_TYPE);
3056 __ bind(&non_instance);
3064 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3065 Register result =
ToRegister(instr->result());
3066 __ LoadRoot(result, instr->index());
3070 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3071 Register arguments =
ToRegister(instr->arguments());
3072 Register result =
ToRegister(instr->result());
3075 if (instr->length()->IsConstantOperand()) {
3076 int const_length =
ToInteger32(LConstantOperand::cast(instr->length()));
3077 if (instr->index()->IsConstantOperand()) {
3078 int const_index =
ToInteger32(LConstantOperand::cast(instr->index()));
3079 int index = (const_length - const_index) + 1;
3083 __ rsb(result, index, Operand(const_length + 1));
3086 }
else if (instr->index()->IsConstantOperand()) {
3087 Register length =
ToRegister(instr->length());
3088 int const_index =
ToInteger32(LConstantOperand::cast(instr->index()));
3089 int loc = const_index - 1;
3091 __ sub(result, length, Operand(loc));
3097 Register length =
ToRegister(instr->length());
3099 __ sub(result, length, index);
3100 __ add(result, result, Operand(1));
3106 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3107 Register external_pointer =
ToRegister(instr->elements());
3110 bool key_is_constant = instr->key()->IsConstantOperand();
3111 int constant_key = 0;
3112 if (key_is_constant) {
3113 constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
3114 if (constant_key & 0xF0000000) {
3115 Abort(kArrayIndexConstantValueTooBig);
3121 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3122 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
3133 (instr->additional_index() << element_size_shift) + additional_offset;
3135 Operand operand = key_is_constant
3136 ? Operand(constant_key << element_size_shift)
3137 : Operand(key,
LSL, shift_size);
3138 __ add(scratch0(), external_pointer, operand);
3141 __ vldr(double_scratch0().low(), scratch0(), base_offset);
3142 __ vcvt_f64_f32(result, double_scratch0().low());
3144 __ vldr(result, scratch0(), base_offset);
3147 Register result =
ToRegister(instr->result());
3149 key, external_pointer, key_is_constant, constant_key,
3150 element_size_shift, shift_size,
3151 instr->additional_index(), additional_offset);
3152 switch (elements_kind) {
3155 __ ldrsb(result, mem_operand);
3161 __ ldrb(result, mem_operand);
3165 __ ldrsh(result, mem_operand);
3169 __ ldrh(result, mem_operand);
3173 __ ldr(result, mem_operand);
3177 __ ldr(result, mem_operand);
3179 __ cmp(result, Operand(0x80000000));
3180 DeoptimizeIf(
cs, instr->environment());
3202 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3203 Register elements =
ToRegister(instr->elements());
3204 bool key_is_constant = instr->key()->IsConstantOperand();
3207 Register scratch = scratch0();
3213 (instr->additional_index() << element_size_shift);
3214 if (key_is_constant) {
3215 int constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
3216 if (constant_key & 0xF0000000) {
3217 Abort(kArrayIndexConstantValueTooBig);
3219 base_offset += constant_key << element_size_shift;
3221 __ add(scratch, elements, Operand(base_offset));
3223 if (!key_is_constant) {
3225 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3226 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
3227 __ add(scratch, scratch, Operand(key,
LSL, shift_size));
3230 __ vldr(result, scratch, 0);
3232 if (instr->hydrogen()->RequiresHoleCheck()) {
3235 DeoptimizeIf(
eq, instr->environment());
3240 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3241 Register elements =
ToRegister(instr->elements());
3242 Register result =
ToRegister(instr->result());
3243 Register scratch = scratch0();
3244 Register store_base = scratch;
3247 if (instr->key()->IsConstantOperand()) {
3248 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3250 instr->additional_index());
3251 store_base = elements;
3258 if (instr->hydrogen()->key()->representation().IsSmi()) {
3259 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3268 if (instr->hydrogen()->RequiresHoleCheck()) {
3271 DeoptimizeIf(
ne, instr->environment());
3273 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3274 __ cmp(result, scratch);
3275 DeoptimizeIf(
eq, instr->environment());
3281 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3282 if (instr->is_typed_elements()) {
3283 DoLoadKeyedExternalArray(instr);
3284 }
else if (instr->hydrogen()->representation().IsDouble()) {
3285 DoLoadKeyedFixedDoubleArray(instr);
3287 DoLoadKeyedFixedArray(instr);
3294 bool key_is_constant,
3298 int additional_index,
3299 int additional_offset) {
3300 int base_offset = (additional_index << element_size) + additional_offset;
3301 if (key_is_constant) {
3303 base_offset + (constant_key << element_size));
3306 if (additional_offset != 0) {
3307 __ mov(scratch0(), Operand(base_offset));
3308 if (shift_size >= 0) {
3309 __ add(scratch0(), scratch0(), Operand(key,
LSL, shift_size));
3312 __ add(scratch0(), scratch0(), Operand(key,
LSR, 1));
3317 if (additional_index != 0) {
3318 additional_index *= 1 << (element_size - shift_size);
3319 __ add(scratch0(), key, Operand(additional_index));
3322 if (additional_index == 0) {
3323 if (shift_size >= 0) {
3331 if (shift_size >= 0) {
3340 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3345 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3350 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3351 Register scratch = scratch0();
3352 Register result =
ToRegister(instr->result());
3354 if (instr->hydrogen()->from_inlined()) {
3358 Label done, adapted;
3371 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3372 Register elem =
ToRegister(instr->elements());
3373 Register result =
ToRegister(instr->result());
3379 __ mov(result, Operand(scope()->num_parameters()));
3386 __ SmiUntag(result);
3393 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3394 Register receiver =
ToRegister(instr->receiver());
3395 Register
function =
ToRegister(instr->function());
3396 Register result =
ToRegister(instr->result());
3397 Register scratch = scratch0();
3402 Label global_object, result_in_receiver;
3404 if (!instr->hydrogen()->known_function()) {
3412 __ tst(scratch, Operand(mask));
3413 __ b(
ne, &result_in_receiver);
3417 __ b(
ne, &result_in_receiver);
3421 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3422 __ cmp(receiver, scratch);
3423 __ b(
eq, &global_object);
3424 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3425 __ cmp(receiver, scratch);
3426 __ b(
eq, &global_object);
3429 __ SmiTst(receiver);
3430 DeoptimizeIf(
eq, instr->environment());
3432 DeoptimizeIf(
lt, instr->environment());
3434 __ b(&result_in_receiver);
3435 __ bind(&global_object);
3442 if (result.is(receiver)) {
3443 __ bind(&result_in_receiver);
3447 __ bind(&result_in_receiver);
3448 __ mov(result, receiver);
3449 __ bind(&result_ok);
3454 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3455 Register receiver =
ToRegister(instr->receiver());
3456 Register
function =
ToRegister(instr->function());
3457 Register length =
ToRegister(instr->length());
3458 Register elements =
ToRegister(instr->elements());
3459 Register scratch = scratch0();
3466 const uint32_t kArgumentsLimit = 1 *
KB;
3467 __ cmp(length, Operand(kArgumentsLimit));
3468 DeoptimizeIf(
hi, instr->environment());
3473 __ mov(receiver, length);
3481 __ cmp(length, Operand::Zero());
3486 __ sub(length, length, Operand(1),
SetCC);
3490 ASSERT(instr->HasPointerMap());
3491 LPointerMap* pointers = instr->pointer_map();
3493 this, pointers, Safepoint::kLazyDeopt);
3496 ParameterCount actual(receiver);
3497 __ InvokeFunction(
function, actual,
CALL_FUNCTION, safepoint_generator);
3501 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3502 LOperand* argument = instr->value();
3503 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3504 Abort(kDoPushArgumentNotImplementedForDoubleType);
3507 __ push(argument_reg);
3512 void LCodeGen::DoDrop(LDrop* instr) {
3513 __ Drop(instr->count());
3517 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3518 Register result =
ToRegister(instr->result());
3523 void LCodeGen::DoContext(LContext* instr) {
3525 Register result =
ToRegister(instr->result());
3526 if (
info()->IsOptimizing()) {
3535 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3538 __ Move(scratch0(), instr->hydrogen()->pairs());
3539 __ push(scratch0());
3540 __ mov(scratch0(), Operand(
Smi::FromInt(instr->hydrogen()->flags())));
3541 __ push(scratch0());
3542 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3546 void LCodeGen::CallKnownFunction(Handle<JSFunction>
function,
3549 LInstruction* instr,
3551 bool dont_adapt_arguments =
3553 bool can_invoke_directly =
3554 dont_adapt_arguments || formal_parameter_count == arity;
3556 LPointerMap* pointers = instr->pointer_map();
3558 if (can_invoke_directly) {
3559 if (r1_state == R1_UNINITIALIZED) {
3560 __ Move(
r1,
function);
3568 if (dont_adapt_arguments) {
3569 __ mov(
r0, Operand(arity));
3577 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3580 ParameterCount count(arity);
3581 ParameterCount expected(formal_parameter_count);
3582 __ InvokeFunction(
function, expected, count,
CALL_FUNCTION, generator);
3591 Register result =
ToRegister(instr->result());
3592 Register scratch = scratch0();
3596 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
3597 __ cmp(scratch, Operand(
ip));
3598 DeoptimizeIf(
ne, instr->environment());
3601 Register exponent = scratch0();
3608 __ Move(result, input);
3614 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
3618 Register tmp1 = input.is(
r1) ?
r0 :
r1;
3619 Register tmp2 = input.
is(
r2) ?
r0 :
r2;
3620 Register tmp3 = input.
is(
r3) ?
r0 :
r3;
3621 Register tmp4 = input.
is(
r4) ?
r0 :
r4;
3625 Label allocated, slow;
3626 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3627 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3633 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3636 if (!tmp1.
is(
r0))
__ mov(tmp1, Operand(
r0));
3638 __ LoadFromSafepointRegisterSlot(input, input);
3641 __ bind(&allocated);
3649 __ StoreToSafepointRegisterSlot(tmp1, result);
3656 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3658 Register result =
ToRegister(instr->result());
3659 __ cmp(input, Operand::Zero());
3660 __ Move(result, input,
pl);
3664 __ rsb(result, input, Operand::Zero(),
SetCC,
mi);
3666 DeoptimizeIf(
vs, instr->environment());
3670 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3674 DeferredMathAbsTaggedHeapNumber(
LCodeGen* codegen, LMathAbs* instr)
3677 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3679 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
3684 Representation r = instr->hydrogen()->value()->representation();
3688 __ vabs(result, input);
3689 }
else if (r.IsSmiOrInteger32()) {
3690 EmitIntegerMathAbs(instr);
3693 DeferredMathAbsTaggedHeapNumber* deferred =
3694 new(zone()) DeferredMathAbsTaggedHeapNumber(
this, instr);
3697 __ JumpIfNotSmi(input, deferred->entry());
3699 EmitIntegerMathAbs(instr);
3700 __ bind(deferred->exit());
3705 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3707 Register result =
ToRegister(instr->result());
3708 Register input_high = scratch0();
3711 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3712 DeoptimizeIf(
al, instr->environment());
3717 __ cmp(result, Operand::Zero());
3719 __ cmp(input_high, Operand::Zero());
3720 DeoptimizeIf(
mi, instr->environment());
3726 void LCodeGen::DoMathRound(LMathRound* instr) {
3728 Register result =
ToRegister(instr->result());
3730 DwVfpRegister input_plus_dot_five = double_scratch1;
3731 Register input_high = scratch0();
3732 DwVfpRegister dot_five = double_scratch0();
3733 Label convert, done;
3735 __ Vmov(dot_five, 0.5, scratch0());
3736 __ vabs(double_scratch1, input);
3737 __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3743 __ VmovHigh(input_high, input);
3744 __ cmp(input_high, Operand::Zero());
3745 DeoptimizeIf(
mi, instr->environment());
3747 __ VFPCompareAndSetFlags(input, dot_five);
3755 __ vadd(input_plus_dot_five, input, dot_five);
3757 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3759 DeoptimizeIf(
al, instr->environment());
3764 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3767 __ vsqrt(result, input);
3771 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3774 DwVfpRegister temp = double_scratch0();
3781 __ VFPCompareAndSetFlags(input, temp);
3782 __ vneg(result, temp,
eq);
3787 __ vsqrt(result, result);
3792 void LCodeGen::DoPower(LPower* instr) {
3793 Representation exponent_type = instr->hydrogen()->right()->representation();
3796 ASSERT(!instr->right()->IsDoubleRegister() ||
3798 ASSERT(!instr->right()->IsRegister() ||
3803 if (exponent_type.IsSmi()) {
3806 }
else if (exponent_type.IsTagged()) {
3808 __ JumpIfSmi(
r2, &no_deopt);
3810 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
3812 DeoptimizeIf(
ne, instr->environment());
3816 }
else if (exponent_type.IsInteger32()) {
3820 ASSERT(exponent_type.IsDouble());
3827 void LCodeGen::DoMathExp(LMathExp* instr) {
3831 DwVfpRegister double_scratch2 = double_scratch0();
3836 masm(), input, result, double_scratch1, double_scratch2,
3837 temp1, temp2, scratch0());
3841 void LCodeGen::DoMathLog(LMathLog* instr) {
3842 __ PrepareCallCFunction(0, 1, scratch0());
3844 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3850 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3852 Register result =
ToRegister(instr->result());
3853 __ clz(result, input);
3857 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3860 ASSERT(instr->HasPointerMap());
3862 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3863 if (known_function.is_null()) {
3864 LPointerMap* pointers = instr->pointer_map();
3866 ParameterCount count(instr->arity());
3869 CallKnownFunction(known_function,
3870 instr->hydrogen()->formal_parameter_count(),
3873 R1_CONTAINS_TARGET);
3878 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3881 LPointerMap* pointers = instr->pointer_map();
3884 if (instr->target()->IsConstantOperand()) {
3885 LConstantOperand* target = LConstantOperand::cast(instr->target());
3887 generator.BeforeCall(
__ CallSize(code, RelocInfo::CODE_TARGET));
3888 PlatformCallInterfaceDescriptor* call_descriptor =
3889 instr->descriptor()->platform_specific_descriptor();
3891 call_descriptor->storage_mode());
3893 ASSERT(instr->target()->IsRegister());
3894 Register target =
ToRegister(instr->target());
3895 generator.BeforeCall(
__ CallSize(target));
3899 generator.AfterCall();
3903 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3907 if (instr->hydrogen()->pass_argument_count()) {
3908 __ mov(
r0, Operand(instr->arity()));
3918 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3922 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3927 int arity = instr->arity();
3928 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
3929 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3933 void LCodeGen::DoCallNew(LCallNew* instr) {
3938 __ mov(
r0, Operand(instr->arity()));
3940 __ LoadRoot(
r2, Heap::kUndefinedValueRootIndex);
3942 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3946 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3951 __ mov(
r0, Operand(instr->arity()));
3952 __ LoadRoot(
r2, Heap::kUndefinedValueRootIndex);
3953 ElementsKind kind = instr->hydrogen()->elements_kind();
3959 if (instr->arity() == 0) {
3960 ArrayNoArgumentConstructorStub stub(kind, override_mode);
3961 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3962 }
else if (instr->arity() == 1) {
3969 __ cmp(
r5, Operand::Zero());
3970 __ b(
eq, &packed_case);
3973 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
3974 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3976 __ bind(&packed_case);
3979 ArraySingleArgumentConstructorStub stub(kind, override_mode);
3980 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3983 ArrayNArgumentsConstructorStub stub(kind, override_mode);
3984 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3989 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3990 CallRuntime(instr->function(), instr->arity(), instr);
3994 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3995 Register
function =
ToRegister(instr->function());
3996 Register code_object =
ToRegister(instr->code_object());
4003 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4004 Register result =
ToRegister(instr->result());
4005 Register base =
ToRegister(instr->base_object());
4006 if (instr->offset()->IsConstantOperand()) {
4007 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4010 Register offset =
ToRegister(instr->offset());
4011 __ add(result, base, offset);
4016 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4017 Representation representation = instr->representation();
4019 Register
object =
ToRegister(instr->object());
4020 Register scratch = scratch0();
4021 HObjectAccess access = instr->hydrogen()->access();
4022 int offset = access.offset();
4024 if (access.IsExternalMemory()) {
4027 __ Store(value, operand, representation);
4031 Handle<Map> transition = instr->transition();
4033 instr->hydrogen()->value()->IsHeapObject()
4036 ASSERT(!(representation.IsSmi() &&
4037 instr->value()->IsConstantOperand() &&
4038 !
IsSmi(LConstantOperand::cast(instr->value()))));
4039 if (representation.IsHeapObject()) {
4041 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4043 DeoptimizeIf(
eq, instr->environment());
4048 }
else if (representation.IsDouble()) {
4049 ASSERT(transition.is_null());
4050 ASSERT(access.IsInobject());
4051 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4057 if (!transition.is_null()) {
4058 __ mov(scratch, Operand(transition));
4060 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4063 __ RecordWriteField(
object,
4076 if (access.IsInobject()) {
4078 __ Store(value, operand, representation);
4079 if (instr->hydrogen()->NeedsWriteBarrier()) {
4081 __ RecordWriteField(
object,
4093 __ Store(value, operand, representation);
4094 if (instr->hydrogen()->NeedsWriteBarrier()) {
4097 __ RecordWriteField(scratch,
4110 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4116 __ mov(
r2, Operand(instr->name()));
4122 void LCodeGen::ApplyCheckIf(
Condition condition, LBoundsCheck*
check) {
4123 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4126 __ stop(
"eliminated bounds check failed");
4129 DeoptimizeIf(condition, check->environment());
4134 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4135 if (instr->hydrogen()->skip_check())
return;
4137 if (instr->index()->IsConstantOperand()) {
4138 int constant_index =
4139 ToInteger32(LConstantOperand::cast(instr->index()));
4140 if (instr->hydrogen()->length()->representation().IsSmi()) {
4143 __ mov(
ip, Operand(constant_index));
4149 Condition condition = instr->hydrogen()->allow_equality() ?
hi :
hs;
4150 ApplyCheckIf(condition, instr);
4154 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4155 Register external_pointer =
ToRegister(instr->elements());
4158 bool key_is_constant = instr->key()->IsConstantOperand();
4159 int constant_key = 0;
4160 if (key_is_constant) {
4161 constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
4162 if (constant_key & 0xF0000000) {
4163 Abort(kArrayIndexConstantValueTooBig);
4169 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4170 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
4180 (instr->additional_index() << element_size_shift) + additional_offset;
4181 Register address = scratch0();
4183 if (key_is_constant) {
4184 if (constant_key != 0) {
4185 __ add(address, external_pointer,
4186 Operand(constant_key << element_size_shift));
4188 address = external_pointer;
4191 __ add(address, external_pointer, Operand(key,
LSL, shift_size));
4195 __ vcvt_f32_f64(double_scratch0().low(), value);
4196 __ vstr(double_scratch0().low(), address, base_offset);
4198 __ vstr(value, address, base_offset);
4203 key, external_pointer, key_is_constant, constant_key,
4204 element_size_shift, shift_size,
4205 instr->additional_index(), additional_offset);
4206 switch (elements_kind) {
4213 __ strb(value, mem_operand);
4219 __ strh(value, mem_operand);
4225 __ str(value, mem_operand);
4246 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4248 Register elements =
ToRegister(instr->elements());
4249 Register scratch = scratch0();
4250 DwVfpRegister double_scratch = double_scratch0();
4251 bool key_is_constant = instr->key()->IsConstantOperand();
4256 if (key_is_constant) {
4257 int constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
4258 if (constant_key & 0xF0000000) {
4259 Abort(kArrayIndexConstantValueTooBig);
4261 __ add(scratch, elements,
4262 Operand((constant_key << element_size_shift) +
4265 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4266 ? (element_size_shift -
kSmiTagSize) : element_size_shift;
4267 __ add(scratch, elements,
4269 __ add(scratch, scratch,
4273 if (instr->NeedsCanonicalization()) {
4275 if (masm()->emit_debug_code()) {
4278 __ Assert(
ne, kDefaultNaNModeNotSet);
4280 __ VFPCanonicalizeNaN(double_scratch, value);
4281 __ vstr(double_scratch, scratch,
4282 instr->additional_index() << element_size_shift);
4284 __ vstr(value, scratch, instr->additional_index() << element_size_shift);
4289 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4291 Register elements =
ToRegister(instr->elements());
4294 Register scratch = scratch0();
4295 Register store_base = scratch;
4299 if (instr->key()->IsConstantOperand()) {
4300 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4301 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4303 instr->additional_index());
4304 store_base = elements;
4310 if (instr->hydrogen()->key()->representation().IsSmi()) {
4311 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4319 if (instr->hydrogen()->NeedsWriteBarrier()) {
4321 instr->hydrogen()->value()->IsHeapObject()
4325 __ RecordWrite(elements,
4336 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4338 if (instr->is_typed_elements()) {
4339 DoStoreKeyedExternalArray(instr);
4340 }
else if (instr->hydrogen()->value()->representation().IsDouble()) {
4341 DoStoreKeyedFixedDoubleArray(instr);
4343 DoStoreKeyedFixedArray(instr);
4348 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4354 Handle<Code> ic = instr->strict_mode() ==
STRICT
4355 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4356 : isolate()->builtins()->KeyedStoreIC_Initialize();
4361 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4362 Register object_reg =
ToRegister(instr->object());
4363 Register scratch = scratch0();
4365 Handle<Map> from_map = instr->original_map();
4366 Handle<Map> to_map = instr->transitioned_map();
4370 Label not_applicable;
4372 __ cmp(scratch, Operand(from_map));
4373 __ b(
ne, ¬_applicable);
4376 Register new_map_reg =
ToRegister(instr->new_map_temp());
4377 __ mov(new_map_reg, Operand(to_map));
4384 PushSafepointRegistersScope scope(
4385 this, Safepoint::kWithRegistersAndDoubles);
4386 __ Move(
r0, object_reg);
4387 __ Move(
r1, to_map);
4388 bool is_js_array = from_map->instance_type() ==
JS_ARRAY_TYPE;
4389 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
4391 RecordSafepointWithRegistersAndDoubles(
4392 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4394 __ bind(¬_applicable);
4398 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4399 Register
object =
ToRegister(instr->object());
4401 Label no_memento_found;
4402 __ TestJSArrayForAllocationMemento(
object, temp, &no_memento_found);
4403 DeoptimizeIf(
eq, instr->environment());
4404 __ bind(&no_memento_found);
4408 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4412 StringAddStub stub(instr->hydrogen()->flags(),
4413 instr->hydrogen()->pretenure_flag());
4414 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4418 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4421 DeferredStringCharCodeAt(
LCodeGen* codegen, LStringCharCodeAt* instr)
4424 codegen()->DoDeferredStringCharCodeAt(instr_);
4426 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4428 LStringCharCodeAt* instr_;
4431 DeferredStringCharCodeAt* deferred =
4432 new(zone()) DeferredStringCharCodeAt(
this, instr);
4439 __ bind(deferred->exit());
4444 Register
string =
ToRegister(instr->string());
4445 Register result =
ToRegister(instr->result());
4446 Register scratch = scratch0();
4451 __ mov(result, Operand::Zero());
4453 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4457 if (instr->index()->IsConstantOperand()) {
4458 int const_index =
ToInteger32(LConstantOperand::cast(instr->index()));
4466 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
4470 __ StoreToSafepointRegisterSlot(
r0, result);
4474 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4477 DeferredStringCharFromCode(
LCodeGen* codegen, LStringCharFromCode* instr)
4480 codegen()->DoDeferredStringCharFromCode(instr_);
4482 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4484 LStringCharFromCode* instr_;
4487 DeferredStringCharFromCode* deferred =
4488 new(zone()) DeferredStringCharFromCode(
this, instr);
4490 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4491 Register char_code =
ToRegister(instr->char_code());
4492 Register result =
ToRegister(instr->result());
4493 ASSERT(!char_code.is(result));
4496 __ b(
hi, deferred->entry());
4497 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4500 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
4502 __ b(
eq, deferred->entry());
4503 __ bind(deferred->exit());
4508 Register char_code =
ToRegister(instr->char_code());
4509 Register result =
ToRegister(instr->result());
4514 __ mov(result, Operand::Zero());
4516 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4517 __ SmiTag(char_code);
4519 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4520 __ StoreToSafepointRegisterSlot(
r0, result);
4524 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4526 ASSERT(input->IsRegister() || input->IsStackSlot());
4527 LOperand* output = instr->result();
4528 ASSERT(output->IsDoubleRegister());
4530 if (input->IsStackSlot()) {
4531 Register scratch = scratch0();
4533 __ vmov(single_scratch, scratch);
4541 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4542 LOperand* input = instr->value();
4543 LOperand* output = instr->result();
4545 SwVfpRegister flt_scratch = double_scratch0().
low();
4551 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4554 DeferredNumberTagI(
LCodeGen* codegen, LNumberTagI* instr)
4557 codegen()->DoDeferredNumberTagIU(instr_,
4563 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4565 LNumberTagI* instr_;
4571 DeferredNumberTagI* deferred =
new(zone()) DeferredNumberTagI(
this, instr);
4573 __ b(
vs, deferred->entry());
4574 __ bind(deferred->exit());
4578 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4581 DeferredNumberTagU(
LCodeGen* codegen, LNumberTagU* instr)
4584 codegen()->DoDeferredNumberTagIU(instr_,
4590 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4592 LNumberTagU* instr_;
4596 Register result =
ToRegister(instr->result());
4598 DeferredNumberTagU* deferred =
new(zone()) DeferredNumberTagU(
this, instr);
4600 __ b(
hi, deferred->entry());
4601 __ SmiTag(result, input);
4602 __ bind(deferred->exit());
4614 Register tmp1 = scratch0();
4624 __ SmiUntag(src, dst);
4625 __ eor(src, src, Operand(0x80000000));
4627 __ vmov(dbl_scratch.
low(), src);
4628 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.
low());
4630 __ vmov(dbl_scratch.
low(), src);
4631 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.
low());
4634 if (FLAG_inline_new) {
4635 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4646 __ mov(dst, Operand::Zero());
4649 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4657 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4658 RecordSafepointWithRegisters(
4659 instr->
pointer_map(), 0, Safepoint::kNoLazyDeopt);
4661 __ StoreToSafepointRegisterSlot(
r0, dst);
4672 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4675 DeferredNumberTagD(
LCodeGen* codegen, LNumberTagD* instr)
4678 codegen()->DoDeferredNumberTagD(instr_);
4680 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4682 LNumberTagD* instr_;
4686 Register scratch = scratch0();
4691 DeferredNumberTagD* deferred =
new(zone()) DeferredNumberTagD(
this, instr);
4692 if (FLAG_inline_new) {
4693 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4695 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4698 __ jmp(deferred->entry());
4700 __ bind(deferred->exit());
4712 __ mov(reg, Operand::Zero());
4714 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4721 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4722 RecordSafepointWithRegisters(
4723 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4725 __ StoreToSafepointRegisterSlot(
r0, reg);
4729 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4730 HChange* hchange = instr->hydrogen();
4732 Register output =
ToRegister(instr->result());
4735 __ tst(input, Operand(0xc0000000));
4736 DeoptimizeIf(
ne, instr->environment());
4740 __ SmiTag(output, input,
SetCC);
4741 DeoptimizeIf(
vs, instr->environment());
4743 __ SmiTag(output, input);
4748 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4750 Register result =
ToRegister(instr->result());
4751 if (instr->needs_check()) {
4754 __ SmiUntag(result, input,
SetCC);
4755 DeoptimizeIf(
cs, instr->environment());
4757 __ SmiUntag(result, input);
4762 void LCodeGen::EmitNumberUntagD(Register input_reg,
4763 DwVfpRegister result_reg,
4764 bool can_convert_undefined_to_nan,
4765 bool deoptimize_on_minus_zero,
4768 Register scratch = scratch0();
4769 SwVfpRegister flt_scratch = double_scratch0().
low();
4770 ASSERT(!result_reg.is(double_scratch0()));
4771 Label convert, load_smi, done;
4774 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4777 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
4778 __ cmp(scratch, Operand(
ip));
4779 if (can_convert_undefined_to_nan) {
4782 DeoptimizeIf(
ne, env);
4786 if (deoptimize_on_minus_zero) {
4787 __ VmovLow(scratch, result_reg);
4788 __ cmp(scratch, Operand::Zero());
4790 __ VmovHigh(scratch, result_reg);
4792 DeoptimizeIf(
eq, env);
4795 if (can_convert_undefined_to_nan) {
4798 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
4799 __ cmp(input_reg, Operand(
ip));
4800 DeoptimizeIf(
ne, env);
4801 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4806 __ SmiUntag(scratch, input_reg);
4812 __ vmov(flt_scratch, scratch);
4813 __ vcvt_f64_s32(result_reg, flt_scratch);
4819 Register input_reg =
ToRegister(instr->value());
4820 Register scratch1 = scratch0();
4821 Register scratch2 =
ToRegister(instr->temp());
4825 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4826 ASSERT(!scratch2.
is(input_reg) && !scratch2.
is(scratch1));
4834 __ adc(scratch2, input_reg, Operand(input_reg));
4838 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
4839 __ cmp(scratch1, Operand(
ip));
4841 if (instr->truncating()) {
4844 Label no_heap_number, check_bools, check_false;
4845 __ b(
ne, &no_heap_number);
4846 __ TruncateHeapNumberToI(input_reg, scratch2);
4851 __ bind(&no_heap_number);
4852 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
4853 __ cmp(scratch2, Operand(
ip));
4854 __ b(
ne, &check_bools);
4855 __ mov(input_reg, Operand::Zero());
4858 __ bind(&check_bools);
4859 __ LoadRoot(
ip, Heap::kTrueValueRootIndex);
4860 __ cmp(scratch2, Operand(
ip));
4861 __ b(
ne, &check_false);
4862 __ mov(input_reg, Operand(1));
4865 __ bind(&check_false);
4866 __ LoadRoot(
ip, Heap::kFalseValueRootIndex);
4867 __ cmp(scratch2, Operand(
ip));
4868 DeoptimizeIf(
ne, instr->environment());
4869 __ mov(input_reg, Operand::Zero());
4873 DeoptimizeIf(
ne, instr->environment());
4877 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
4878 DeoptimizeIf(
ne, instr->environment());
4881 __ cmp(input_reg, Operand::Zero());
4883 __ VmovHigh(scratch1, double_scratch2);
4885 DeoptimizeIf(
ne, instr->environment());
4892 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4895 DeferredTaggedToI(
LCodeGen* codegen, LTaggedToI* instr)
4898 codegen()->DoDeferredTaggedToI(instr_);
4900 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4905 LOperand* input = instr->value();
4906 ASSERT(input->IsRegister());
4907 ASSERT(input->Equals(instr->result()));
4911 if (instr->hydrogen()->value()->representation().IsSmi()) {
4912 __ SmiUntag(input_reg);
4914 DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(
this, instr);
4918 __ SmiUntag(input_reg,
SetCC);
4921 __ b(
cs, deferred->entry());
4922 __ bind(deferred->exit());
4927 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4928 LOperand* input = instr->value();
4929 ASSERT(input->IsRegister());
4930 LOperand* result = instr->result();
4931 ASSERT(result->IsDoubleRegister());
4936 HValue* value = instr->hydrogen()->value();
4940 EmitNumberUntagD(input_reg, result_reg,
4941 instr->hydrogen()->can_convert_undefined_to_nan(),
4942 instr->hydrogen()->deoptimize_on_minus_zero(),
4943 instr->environment(),
4948 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4949 Register result_reg =
ToRegister(instr->result());
4950 Register scratch1 = scratch0();
4952 LowDwVfpRegister double_scratch = double_scratch0();
4954 if (instr->truncating()) {
4955 __ TruncateDoubleToI(result_reg, double_input);
4957 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
4959 DeoptimizeIf(
ne, instr->environment());
4962 __ cmp(result_reg, Operand::Zero());
4964 __ VmovHigh(scratch1, double_input);
4966 DeoptimizeIf(
ne, instr->environment());
4973 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4974 Register result_reg =
ToRegister(instr->result());
4975 Register scratch1 = scratch0();
4977 LowDwVfpRegister double_scratch = double_scratch0();
4979 if (instr->truncating()) {
4980 __ TruncateDoubleToI(result_reg, double_input);
4982 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
4984 DeoptimizeIf(
ne, instr->environment());
4987 __ cmp(result_reg, Operand::Zero());
4989 __ VmovHigh(scratch1, double_input);
4991 DeoptimizeIf(
ne, instr->environment());
4996 DeoptimizeIf(
vs, instr->environment());
5000 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5001 LOperand* input = instr->value();
5003 DeoptimizeIf(
ne, instr->environment());
5007 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5008 if (!instr->hydrogen()->value()->IsHeapObject()) {
5009 LOperand* input = instr->value();
5011 DeoptimizeIf(
eq, instr->environment());
5016 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5018 Register scratch = scratch0();
5023 if (instr->hydrogen()->is_interval_check()) {
5026 instr->hydrogen()->GetCheckInterval(&first, &last);
5028 __ cmp(scratch, Operand(first));
5031 if (first == last) {
5032 DeoptimizeIf(
ne, instr->environment());
5034 DeoptimizeIf(
lo, instr->environment());
5037 __ cmp(scratch, Operand(last));
5038 DeoptimizeIf(
hi, instr->environment());
5044 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5048 __ tst(scratch, Operand(mask));
5049 DeoptimizeIf(tag == 0 ?
ne :
eq, instr->environment());
5051 __ and_(scratch, scratch, Operand(mask));
5052 __ cmp(scratch, Operand(tag));
5053 DeoptimizeIf(
ne, instr->environment());
5059 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5061 Handle<HeapObject>
object = instr->hydrogen()->object().handle();
5063 if (isolate()->heap()->InNewSpace(*
object)) {
5065 Handle<Cell> cell = isolate()->factory()->NewCell(
object);
5066 __ mov(
ip, Operand(Handle<Object>(cell)));
5070 __ cmp(reg, Operand(
object));
5072 DeoptimizeIf(
ne, instr->environment());
5078 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5080 __ mov(
cp, Operand::Zero());
5081 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5082 RecordSafepointWithRegisters(
5083 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5084 __ StoreToSafepointRegisterSlot(
r0, scratch0());
5087 DeoptimizeIf(
eq, instr->environment());
5091 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5094 DeferredCheckMaps(
LCodeGen* codegen, LCheckMaps* instr, Register
object)
5096 SetExit(check_maps());
5101 Label* check_maps() {
return &check_maps_; }
5102 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5109 if (instr->hydrogen()->CanOmitMapChecks())
return;
5110 Register map_reg = scratch0();
5112 LOperand* input = instr->value();
5113 ASSERT(input->IsRegister());
5118 DeferredCheckMaps* deferred =
NULL;
5119 if (instr->hydrogen()->has_migration_target()) {
5120 deferred =
new(zone()) DeferredCheckMaps(
this, instr, reg);
5121 __ bind(deferred->check_maps());
5124 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5126 for (
int i = 0; i < map_set.size() - 1; i++) {
5127 Handle<Map> map = map_set.at(i).handle();
5128 __ CompareMap(map_reg, map, &success);
5132 Handle<Map> map = map_set.at(map_set.size() - 1).
handle();
5133 __ CompareMap(map_reg, map, &success);
5134 if (instr->hydrogen()->has_migration_target()) {
5135 __ b(
ne, deferred->entry());
5137 DeoptimizeIf(
ne, instr->environment());
5144 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5146 Register result_reg =
ToRegister(instr->result());
5147 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5151 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5152 Register unclamped_reg =
ToRegister(instr->unclamped());
5153 Register result_reg =
ToRegister(instr->result());
5154 __ ClampUint8(result_reg, unclamped_reg);
5158 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5159 Register scratch = scratch0();
5160 Register input_reg =
ToRegister(instr->unclamped());
5161 Register result_reg =
ToRegister(instr->result());
5163 Label is_smi, done, heap_number;
5166 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5170 __ cmp(scratch, Operand(factory()->heap_number_map()));
5171 __ b(
eq, &heap_number);
5175 __ cmp(input_reg, Operand(factory()->undefined_value()));
5176 DeoptimizeIf(
ne, instr->environment());
5177 __ mov(result_reg, Operand::Zero());
5181 __ bind(&heap_number);
5183 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5188 __ ClampUint8(result_reg, result_reg);
5194 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5196 Register result_reg =
ToRegister(instr->result());
5197 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5198 __ VmovHigh(result_reg, value_reg);
5200 __ VmovLow(result_reg, value_reg);
5205 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5209 __ VmovHigh(result_reg, hi_reg);
5210 __ VmovLow(result_reg, lo_reg);
5214 void LCodeGen::DoAllocate(LAllocate* instr) {
5217 DeferredAllocate(
LCodeGen* codegen, LAllocate* instr)
5222 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5227 DeferredAllocate* deferred =
5228 new(zone()) DeferredAllocate(
this, instr);
5230 Register result =
ToRegister(instr->result());
5231 Register scratch =
ToRegister(instr->temp1());
5232 Register scratch2 =
ToRegister(instr->temp2());
5236 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5239 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5240 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5241 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5243 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5244 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5248 if (instr->size()->IsConstantOperand()) {
5251 __ Allocate(size, result, scratch, scratch2, deferred->entry(),
flags);
5253 __ jmp(deferred->entry());
5265 __ bind(deferred->exit());
5267 if (instr->hydrogen()->MustPrefillWithFiller()) {
5268 if (instr->size()->IsConstantOperand()) {
5270 __ mov(scratch, Operand(size));
5278 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5281 __ cmp(scratch, Operand(0));
5289 Register result =
ToRegister(instr->result());
5296 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5297 if (instr->size()->IsRegister()) {
5308 instr->hydrogen()->MustAllocateDoubleAligned());
5309 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5310 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5311 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5313 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5314 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5321 CallRuntimeFromDeferred(
5322 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5323 __ StoreToSafepointRegisterSlot(
r0, result);
5327 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5330 CallRuntime(Runtime::kToFastProperties, 1, instr);
5334 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5342 int literal_offset =
5344 __ Move(
r6, instr->hydrogen()->literals());
5346 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
5348 __ b(
ne, &materialized);
5353 __ mov(
r4, Operand(instr->hydrogen()->pattern()));
5354 __ mov(
r3, Operand(instr->hydrogen()->flags()));
5356 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5359 __ bind(&materialized);
5361 Label allocated, runtime_allocate;
5366 __ bind(&runtime_allocate);
5369 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5372 __ bind(&allocated);
5374 __ CopyFields(
r0,
r1, double_scratch0(), size / kPointerSize);
5378 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5382 bool pretenure = instr->hydrogen()->pretenure();
5383 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5384 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
5385 instr->hydrogen()->is_generator());
5386 __ mov(
r2, Operand(instr->hydrogen()->shared_info()));
5387 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5389 __ mov(
r2, Operand(instr->hydrogen()->shared_info()));
5390 __ mov(
r1, Operand(pretenure ? factory()->true_value()
5391 : factory()->false_value()));
5393 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
5398 void LCodeGen::DoTypeof(LTypeof* instr) {
5401 CallRuntime(Runtime::kTypeof, 1, instr);
5405 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5408 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5409 instr->FalseLabel(chunk_),
5411 instr->type_literal());
5413 EmitBranch(instr, final_branch_condition);
5418 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5421 Handle<String> type_name) {
5423 Register scratch = scratch0();
5424 if (type_name->Equals(heap()->number_string())) {
5425 __ JumpIfSmi(input, true_label);
5427 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5428 final_branch_condition =
eq;
5430 }
else if (type_name->Equals(heap()->string_string())) {
5431 __ JumpIfSmi(input, false_label);
5433 __ b(
ge, false_label);
5436 final_branch_condition =
eq;
5438 }
else if (type_name->Equals(heap()->symbol_string())) {
5439 __ JumpIfSmi(input, false_label);
5441 final_branch_condition =
eq;
5443 }
else if (type_name->Equals(heap()->boolean_string())) {
5444 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5445 __ b(
eq, true_label);
5446 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5447 final_branch_condition =
eq;
5449 }
else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5450 __ CompareRoot(input, Heap::kNullValueRootIndex);
5451 final_branch_condition =
eq;
5453 }
else if (type_name->Equals(heap()->undefined_string())) {
5454 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5455 __ b(
eq, true_label);
5456 __ JumpIfSmi(input, false_label);
5461 final_branch_condition =
ne;
5463 }
else if (type_name->Equals(heap()->function_string())) {
5465 Register type_reg = scratch;
5466 __ JumpIfSmi(input, false_label);
5468 __ b(
eq, true_label);
5470 final_branch_condition =
eq;
5472 }
else if (type_name->Equals(heap()->object_string())) {
5473 Register map = scratch;
5474 __ JumpIfSmi(input, false_label);
5475 if (!FLAG_harmony_typeof) {
5476 __ CompareRoot(input, Heap::kNullValueRootIndex);
5477 __ b(
eq, true_label);
5479 __ CheckObjectTypeRange(input,
5487 final_branch_condition =
eq;
5493 return final_branch_condition;
5497 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5500 EmitIsConstructCall(temp1, scratch0());
5501 EmitBranch(instr,
eq);
5505 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5506 ASSERT(!temp1.is(temp2));
5521 void LCodeGen::EnsureSpaceForLazyDeopt(
int space_needed) {
5522 if (!
info()->IsStub()) {
5525 int current_pc = masm()->pc_offset();
5526 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5529 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5531 while (padding_size > 0) {
5537 last_lazy_deopt_pc_ = masm()->pc_offset();
5541 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5542 last_lazy_deopt_pc_ = masm()->pc_offset();
5543 ASSERT(instr->HasEnvironment());
5545 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5546 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5550 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5560 Comment(
";;; deoptimize: %s", instr->hydrogen()->reason());
5561 DeoptimizeIf(
al, instr->environment(), type);
5565 void LCodeGen::DoDummy(LDummy* instr) {
5570 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5576 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5577 LoadContextFromDeferred(instr->context());
5578 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
5579 RecordSafepointWithLazyDeopt(
5580 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5581 ASSERT(instr->HasEnvironment());
5583 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5587 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5590 DeferredStackCheck(
LCodeGen* codegen, LStackCheck* instr)
5595 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5597 LStackCheck* instr_;
5600 ASSERT(instr->HasEnvironment());
5604 if (instr->hydrogen()->is_function_entry()) {
5607 __ LoadRoot(
ip, Heap::kStackLimitRootIndex);
5611 ASSERT(instr->context()->IsRegister());
5613 CallCode(isolate()->builtins()->StackCheck(),
5614 RelocInfo::CODE_TARGET,
5618 ASSERT(instr->hydrogen()->is_backwards_branch());
5620 DeferredStackCheck* deferred_stack_check =
5621 new(zone()) DeferredStackCheck(
this, instr);
5622 __ LoadRoot(
ip, Heap::kStackLimitRootIndex);
5624 __ b(
lo, deferred_stack_check->entry());
5626 __ bind(instr->done_label());
5627 deferred_stack_check->SetExit(instr->done_label());
5628 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5636 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5644 ASSERT(!environment->HasBeenRegistered());
5645 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5647 GenerateOsrPrologue();
5651 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5652 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
5654 DeoptimizeIf(
eq, instr->environment());
5656 Register null_value =
r5;
5657 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5658 __ cmp(
r0, null_value);
5659 DeoptimizeIf(
eq, instr->environment());
5662 DeoptimizeIf(
eq, instr->environment());
5666 DeoptimizeIf(
le, instr->environment());
5668 Label use_cache, call_runtime;
5669 __ CheckEnumCache(null_value, &call_runtime);
5675 __ bind(&call_runtime);
5677 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5680 __ LoadRoot(
ip, Heap::kMetaMapRootIndex);
5682 DeoptimizeIf(
ne, instr->environment());
5683 __ bind(&use_cache);
5687 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5689 Register result =
ToRegister(instr->result());
5690 Label load_cache, done;
5691 __ EnumLength(result, map);
5693 __ b(
ne, &load_cache);
5694 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5697 __ bind(&load_cache);
5698 __ LoadInstanceDescriptors(map, result);
5703 __ cmp(result, Operand::Zero());
5704 DeoptimizeIf(
eq, instr->environment());
5710 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5711 Register
object =
ToRegister(instr->value());
5714 __ cmp(map, scratch0());
5715 DeoptimizeIf(
ne, instr->environment());
5719 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5720 Register
object =
ToRegister(instr->object());
5722 Register result =
ToRegister(instr->result());
5723 Register scratch = scratch0();
5725 Label out_of_object, done;
5726 __ cmp(index, Operand::Zero());
5727 __ b(
lt, &out_of_object);
5729 __ add(scratch,
object, Operand::PointerOffsetFromSmiKey(index));
5734 __ bind(&out_of_object);
5738 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static DwVfpRegister FromAllocationIndex(int index)
static const int kLengthOffset
void FinishCode(Handle< Code > code)
static const int kHashFieldOffset
static const int kBitFieldOffset
MemOperand ToHighMemOperand(LOperand *op) const
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
const intptr_t kSmiTagMask
static const int kCodeEntryOffset
static const int kPrototypeOrInitialMapOffset
static const int kValueOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
static const int kEnumCacheOffset
const uint32_t kTwoByteStringTag
int StackSlotOffset(int index)
const LowDwVfpRegister d0
RegisterType type() const
static Smi * FromInt(int value)
Smi * ToSmi(LConstantOperand *op) const
static TypeFeedbackId None()
bool NeedsEagerFrame() const
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
Register EmitLoadRegister(LOperand *op, Register scratch)
static Handle< Code > GetUninitialized(Isolate *isolate, Token::Value op)
static const int kDataOffset
bool IsSmi(LConstantOperand *op) const
static Handle< T > cast(Handle< S > that)
static const int kGlobalReceiverOffset
static Representation Integer32()
static const unsigned int kContainsCachedArrayIndexMask
static bool IsSupported(CpuFeature f)
AllocationSiteOverrideMode
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
#define ASSERT(condition)
friend class BlockConstPoolScope
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
static const int kContextOffset
const int kPointerSizeLog2
static const int kInObjectFieldCount
const uint32_t kStringRepresentationMask
MemOperand GlobalObjectOperand()
static const int kCallerFPOffset
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
static Handle< Code > initialize_stub(Isolate *isolate, ExtraICState extra_state)
static const int kInstanceClassNameOffset
int WhichPowerOf2(uint32_t x)
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Variable * parameter(int index) const
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
virtual LOperand * result() const =0
void DoDeferredStackCheck(LStackCheck *instr)
DwVfpRegister EmitLoadDoubleRegister(LOperand *op, SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch)
int LookupDestination(int block_id) const
Condition ReverseCondition(Condition cond)
Operand ToOperand(LOperand *op)
const uint32_t kVFPDefaultNaNModeControlBit
const uint32_t kSlotsZapValue
int32_t WhichPowerOf2Abs(int32_t x)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
friend class LEnvironment
static const int kLengthOffset
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kHoleNanUpper32
static const int kDontAdaptArgumentsSentinel
void DoDeferredNumberTagD(LNumberTagD *instr)
static uint32_t update(uint32_tprevious, AllocationSpacevalue)
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
void check(i::Vector< const uint8_t > string)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
static void MaybeCallEntryHook(MacroAssembler *masm)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
const uint32_t kHoleNanLower32
static const int kMaxRegularHeapObjectSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
static const int kCallerSPOffset
static const int kCacheStampOffset
bool IsFixedTypedArrayElementsKind(ElementsKind kind)
static const int kPropertiesOffset
int32_t ToInteger32(LConstantOperand *op) const
int num_parameters() const
bool IsInteger32(LConstantOperand *op) const
static const int kMarkerOffset
bool IsFastSmiElementsKind(ElementsKind kind)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
Handle< Object > ToHandle(LConstantOperand *op) const
static const int kHeaderSize
#define STATIC_ASCII_VECTOR(x)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
static int OffsetOfElementAt(int index)
SwVfpRegister low() const
static int SizeFor(int length)
bool NeedsDeferredFrame() const
static const int kHeaderSize
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
LPointerMap * pointer_map() const
static const int kMapOffset
static const int kValueOffset
static const int kFixedFrameSizeFromFp
bool is(Register reg) const
const LowDwVfpRegister d2
virtual ~SafepointGenerator()
Handle< T > handle(T *t, Isolate *isolate)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
MemOperand FieldMemOperand(Register object, int offset)
static const int kHasNonInstancePrototype
void WriteTranslation(LEnvironment *environment, Translation *translation)
static const int kFunctionOffset
static const uint32_t kSignMask
static const int kNotDeoptimizationEntry
LinkRegisterStatus GetLinkRegisterState() const
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
static const int kHeaderSize
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
MemOperand PrepareKeyedOperand(Register key, Register base, bool key_is_constant, int constant_key, int element_size, int shift_size, int additional_index, int additional_offset)
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
static const int kConstructorOffset
const uint32_t kOneByteStringTag
static const int kIsUndetectable
virtual void AfterCall() const V8_OVERRIDE
static const int kHeaderSize
Register ToRegister(LOperand *op) const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
void DoDeferredTaggedToI(LTaggedToI *instr)
bool IsSmiOrTagged() const
static const int kInstrSize
LParallelMove * GetParallelMove(InnerPosition pos)
static const int kPrototypeOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
#define RUNTIME_ENTRY(name, nargs, ressize)
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
const LowDwVfpRegister d1
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
MemOperand ToMemOperand(LOperand *op) const
bool IsNextEmittedBlock(int block_id) const
static const int kCompilerHintsOffset
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
static const int kSharedFunctionInfoOffset
static const int kMaxValue
friend class SafepointGenerator
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
static const int kExponentOffset
bool EvalComparison(Token::Value op, double op1, double op2)
static uint32_t encode(boolvalue)
const uint32_t kStringEncodingMask
static const int kInstanceTypeOffset
virtual void BeforeCall(int call_size) const V8_OVERRIDE
static const int kMantissaOffset
friend class LDeferredCode