30 #if V8_TARGET_ARCH_IA32
52 class SafepointGenerator
V8_FINAL :
public CallWrapper {
54 SafepointGenerator(LCodeGen* codegen,
55 LPointerMap* pointers,
56 Safepoint::DeoptMode
mode)
60 virtual ~SafepointGenerator() {}
62 virtual void BeforeCall(
int call_size)
const V8_OVERRIDE {}
65 codegen_->RecordSafepoint(pointers_, deopt_mode_);
70 LPointerMap* pointers_;
71 Safepoint::DeoptMode deopt_mode_;
78 LPhase phase(
"Z_Code generation", chunk());
85 FrameScope frame_scope(masm_, StackFrame::MANUAL);
87 support_aligned_spilled_doubles_ =
info()->IsOptimizing();
89 dynamic_frame_alignment_ =
info()->IsOptimizing() &&
90 ((chunk()->num_double_slots() > 2 &&
91 !chunk()->graph()->is_recursive()) ||
92 !
info()->osr_ast_id().IsNone());
94 return GeneratePrologue() &&
96 GenerateDeferredCode() &&
97 GenerateJumpTable() &&
98 GenerateSafepointTable();
104 code->set_stack_slots(GetStackSlotCount());
105 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
106 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
107 PopulateDeoptimizationData(code);
108 if (!
info()->IsStub()) {
111 info()->CommitDependencies(code);
116 info()->set_bailout_reason(reason);
122 void LCodeGen::MakeSureStackPagesMapped(
int offset) {
123 const int kPageSize = 4 *
KB;
124 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
131 void LCodeGen::SaveCallerDoubles() {
134 Comment(
";;; Save clobbered callee double registers");
135 CpuFeatureScope scope(masm(),
SSE2);
137 BitVector* doubles = chunk()->allocated_double_registers();
138 BitVector::Iterator save_iterator(doubles);
139 while (!save_iterator.Done()) {
142 save_iterator.Advance();
148 void LCodeGen::RestoreCallerDoubles() {
151 Comment(
";;; Restore clobbered callee double registers");
152 CpuFeatureScope scope(masm(),
SSE2);
153 BitVector* doubles = chunk()->allocated_double_registers();
154 BitVector::Iterator save_iterator(doubles);
156 while (!save_iterator.Done()) {
159 save_iterator.Advance();
165 bool LCodeGen::GeneratePrologue() {
168 if (
info()->IsOptimizing()) {
172 if (strlen(FLAG_stop_at) > 0 &&
173 info_->function()->name()->IsUtf8EqualTo(
CStrVector(FLAG_stop_at))) {
181 if (info_->this_has_uses() &&
182 info_->strict_mode() ==
SLOPPY &&
183 !info_->is_native()) {
187 __ mov(
ecx, Operand(
esp, receiver_offset));
189 __ cmp(
ecx, isolate()->factory()->undefined_value());
195 __ mov(Operand(
esp, receiver_offset),
ecx);
200 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
204 Label do_not_pad, align_loop;
209 __ push(Immediate(0));
213 __ mov(
ecx, Immediate(scope()->num_parameters() + 2));
215 __ bind(&align_loop);
222 __ bind(&do_not_pad);
226 info()->set_prologue_offset(masm_->pc_offset());
229 frame_is_built_ =
true;
231 info()->AddNoFrameRange(0, masm_->pc_offset());
234 if (
info()->IsOptimizing() &&
235 dynamic_frame_alignment_ &&
238 __ Assert(
zero, kFrameIsExpectedToBeAligned);
242 int slots = GetStackSlotCount();
243 ASSERT(slots != 0 || !
info()->IsOptimizing());
246 if (dynamic_frame_alignment_) {
252 if (FLAG_debug_code) {
258 __ mov(Operand(
eax), Immediate(slots));
273 if (support_aligned_spilled_doubles_) {
274 Comment(
";;; Store dynamic frame alignment tag for spilled doubles");
277 if (dynamic_frame_alignment_) {
292 if (heap_slots > 0) {
293 Comment(
";;; Allocate local context");
295 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
296 FastNewContextStub stub(heap_slots);
300 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
302 RecordSafepoint(Safepoint::kNoLazyDeopt);
310 for (
int i = 0; i < num_parameters; i++) {
312 if (var->IsContextSlot()) {
316 __ mov(
eax, Operand(
ebp, parameter_offset));
319 __ mov(Operand(
esi, context_offset),
eax);
321 __ RecordWriteContextSlot(
esi,
328 Comment(
";;; End allocate local context");
332 if (FLAG_trace &&
info()->IsOptimizing()) {
335 __ CallRuntime(Runtime::kTraceEnter, 0);
337 return !is_aborted();
341 void LCodeGen::GenerateOsrPrologue() {
344 if (osr_pc_offset_ >= 0)
return;
346 osr_pc_offset_ = masm()->pc_offset();
351 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
352 Label do_not_pad, align_loop;
355 __ j(
zero, &do_not_pad, Label::kNear);
356 __ push(Immediate(0));
363 __ mov(
ecx, Immediate(scope()->num_parameters() +
364 5 + graph()->osr()->UnoptimizedFrameSlots()));
366 __ bind(&align_loop);
374 __ bind(&do_not_pad);
379 __ push(alignment_loc);
382 __ mov(alignment_loc,
edx);
386 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
392 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
393 if (instr->IsCall()) {
396 if (!instr->IsLazyBailout() && !instr->IsGap()) {
397 safepoints_.BumpLastLazySafepointIndex();
403 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
405 if (instr->IsGoto()) {
406 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
408 !instr->IsGap() && !instr->IsReturn()) {
409 if (instr->ClobbersDoubleRegisters()) {
410 if (instr->HasDoubleRegisterResult()) {
416 __ VerifyX87StackDepth(x87_stack_.depth());
422 bool LCodeGen::GenerateJumpTable() {
424 if (jump_table_.length() > 0) {
425 Comment(
";;; -------------------- Jump table --------------------");
427 for (
int i = 0; i < jump_table_.length(); i++) {
428 __ bind(&jump_table_[i].label);
429 Address entry = jump_table_[i].address;
433 Comment(
";;; jump table entry %d.", i);
435 Comment(
";;; jump table entry %d: deoptimization bailout %d.", i,
id);
437 if (jump_table_[i].needs_frame) {
439 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
440 if (needs_frame.is_bound()) {
441 __ jmp(&needs_frame);
443 __ bind(&needs_frame);
454 Label push_approx_pc;
455 __ call(&push_approx_pc);
456 __ bind(&push_approx_pc);
466 RestoreCallerDoubles();
471 return !is_aborted();
475 bool LCodeGen::GenerateDeferredCode() {
477 if (deferred_.length() > 0) {
478 for (
int i = 0; !is_aborted() && i < deferred_.length(); i++) {
480 X87Stack copy(code->x87_stack());
484 instructions_->at(code->instruction_index())->hydrogen_value();
485 RecordAndWritePosition(
486 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
488 Comment(
";;; <@%d,#%d> "
489 "-------------------- Deferred %s --------------------",
490 code->instruction_index(),
491 code->instr()->hydrogen_value()->id(),
492 code->instr()->Mnemonic());
493 __ bind(code->entry());
495 Comment(
";;; Build frame");
498 frame_is_built_ =
true;
504 Comment(
";;; Deferred code");
508 __ bind(code->done());
509 Comment(
";;; Destroy frame");
511 frame_is_built_ =
false;
515 __ jmp(code->exit());
521 if (!is_aborted()) status_ =
DONE;
522 return !is_aborted();
526 bool LCodeGen::GenerateSafepointTable() {
528 if (!
info()->IsStub()) {
533 while (masm()->pc_offset() < target_offset) {
537 safepoints_.Emit(masm(), GetStackSlotCount());
538 return !is_aborted();
558 ASSERT(x87_stack_.Contains(reg));
559 x87_stack_.Fxch(reg);
565 ASSERT(x87_stack_.Contains(reg1));
566 ASSERT(x87_stack_.Contains(reg2));
567 x87_stack_.Fxch(reg1, 1);
568 x87_stack_.Fxch(reg2);
574 void LCodeGen::X87Stack::Fxch(X87Register reg,
int other_slot) {
576 ASSERT(Contains(reg) && stack_depth_ > other_slot);
577 int i = ArrayIndex(reg);
579 if (st != other_slot) {
580 int other_i = st2idx(other_slot);
581 X87Register other = stack_[other_i];
582 stack_[other_i] = reg;
586 }
else if (other_slot == 0) {
597 int LCodeGen::X87Stack::st2idx(
int pos) {
598 return stack_depth_ - pos - 1;
602 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
603 for (
int i = 0; i < stack_depth_; i++) {
604 if (stack_[i].is(reg))
return i;
611 bool LCodeGen::X87Stack::Contains(X87Register reg) {
612 for (
int i = 0; i < stack_depth_; i++) {
613 if (stack_[i].is(reg))
return true;
619 void LCodeGen::X87Stack::Free(X87Register reg) {
622 int i = ArrayIndex(reg);
626 int tos_i = st2idx(0);
627 stack_[i] = stack_[tos_i];
635 if (x87_stack_.Contains(dst)) {
636 x87_stack_.Fxch(dst);
639 x87_stack_.push(dst);
645 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
646 ASSERT(!src.is_reg_only());
664 ASSERT(!dst.is_reg_only());
665 x87_stack_.Fxch(src);
679 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
685 stack_[stack_depth_] = reg;
689 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
692 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
699 X87Register left, X87Register right, X87Register result) {
702 x87_stack_.Fxch(right, 1);
703 x87_stack_.Fxch(left);
707 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
708 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
709 bool double_inputs = instr->HasDoubleRegisterInput();
712 for (
int i = stack_depth_-1; i >= 0; i--) {
713 X87Register reg = stack_[i];
716 if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
720 if (i < stack_depth_-1) i++;
723 if (instr->IsReturn()) {
724 while (stack_depth_ > 0) {
733 void LCodeGen::X87Stack::LeavingBlock(
int current_block_id, LGoto* goto_instr) {
734 ASSERT(stack_depth_ <= 1);
738 if (current_block_id + 1 != goto_instr->block_id()) {
747 void LCodeGen::EmitFlushX87ForDeopt() {
754 __ VerifyX87StackDepth(x87_stack_.depth());
757 for (
int i = 0; i < x87_stack_.depth(); i++)
__ fstp(0);
768 ASSERT(op->IsDoubleRegister());
774 ASSERT(op->IsDoubleRegister());
785 const Representation& r)
const {
786 HConstant* constant = chunk_->LookupConstant(op);
787 int32_t value = constant->Integer32Value();
788 if (r.IsInteger32())
return value;
789 ASSERT(r.IsSmiOrTagged());
795 HConstant* constant = chunk_->LookupConstant(op);
796 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
797 return constant->handle(isolate());
802 HConstant* constant = chunk_->LookupConstant(op);
803 ASSERT(constant->HasDoubleValue());
804 return constant->DoubleValue();
808 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op)
const {
809 HConstant* constant = chunk_->LookupConstant(op);
810 ASSERT(constant->HasExternalReferenceValue());
811 return constant->ExternalReferenceValue();
816 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
821 return chunk_->LookupLiteralRepresentation(op).IsSmi();
825 static int ArgumentsOffsetWithoutFrame(
int index) {
832 if (op->IsRegister())
return Operand(
ToRegister(op));
834 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
840 return Operand(
esp, ArgumentsOffsetWithoutFrame(op->index()));
846 ASSERT(op->IsDoubleStackSlot());
859 Translation* translation) {
860 if (environment ==
NULL)
return;
863 int translation_size = environment->translation_size();
865 int height = translation_size - environment->parameter_count();
868 bool has_closure_id = !
info()->closure().is_null() &&
869 !
info()->closure().is_identical_to(environment->closure());
870 int closure_id = has_closure_id
871 ? DefineDeoptimizationLiteral(environment->closure())
872 : Translation::kSelfLiteralId;
873 switch (environment->frame_type()) {
875 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
878 translation->BeginConstructStubFrame(closure_id, translation_size);
881 ASSERT(translation_size == 1);
883 translation->BeginGetterStubFrame(closure_id);
886 ASSERT(translation_size == 2);
888 translation->BeginSetterStubFrame(closure_id);
891 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
894 translation->BeginCompiledStubFrame();
900 int object_index = 0;
901 int dematerialized_index = 0;
902 for (
int i = 0; i < translation_size; ++i) {
903 LOperand* value = environment->values()->at(i);
904 AddToTranslation(environment,
907 environment->HasTaggedValueAt(i),
908 environment->HasUint32ValueAt(i),
910 &dematerialized_index);
915 void LCodeGen::AddToTranslation(LEnvironment* environment,
916 Translation* translation,
920 int* object_index_pointer,
921 int* dematerialized_index_pointer) {
922 if (op == LEnvironment::materialization_marker()) {
923 int object_index = (*object_index_pointer)++;
924 if (environment->ObjectIsDuplicateAt(object_index)) {
925 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
926 translation->DuplicateObject(dupe_of);
929 int object_length = environment->ObjectLengthAt(object_index);
930 if (environment->ObjectIsArgumentsAt(object_index)) {
931 translation->BeginArgumentsObject(object_length);
933 translation->BeginCapturedObject(object_length);
935 int dematerialized_index = *dematerialized_index_pointer;
936 int env_offset = environment->translation_size() + dematerialized_index;
937 *dematerialized_index_pointer += object_length;
938 for (
int i = 0; i < object_length; ++i) {
939 LOperand* value = environment->values()->at(env_offset + i);
940 AddToTranslation(environment,
943 environment->HasTaggedValueAt(env_offset + i),
944 environment->HasUint32ValueAt(env_offset + i),
945 object_index_pointer,
946 dematerialized_index_pointer);
951 if (op->IsStackSlot()) {
953 translation->StoreStackSlot(op->index());
954 }
else if (is_uint32) {
955 translation->StoreUint32StackSlot(op->index());
957 translation->StoreInt32StackSlot(op->index());
959 }
else if (op->IsDoubleStackSlot()) {
960 translation->StoreDoubleStackSlot(op->index());
961 }
else if (op->IsRegister()) {
964 translation->StoreRegister(reg);
965 }
else if (is_uint32) {
966 translation->StoreUint32Register(reg);
968 translation->StoreInt32Register(reg);
970 }
else if (op->IsDoubleRegister()) {
972 translation->StoreDoubleRegister(reg);
973 }
else if (op->IsConstantOperand()) {
974 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
975 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
976 translation->StoreLiteral(src_index);
983 void LCodeGen::CallCodeGeneric(Handle<Code> code,
984 RelocInfo::Mode
mode,
986 SafepointMode safepoint_mode) {
989 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
993 if (code->kind() == Code::BINARY_OP_IC ||
994 code->kind() == Code::COMPARE_IC) {
1000 void LCodeGen::CallCode(Handle<Code> code,
1001 RelocInfo::Mode mode,
1002 LInstruction* instr) {
1003 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
1007 void LCodeGen::CallRuntime(
const Runtime::Function* fun,
1009 LInstruction* instr,
1012 ASSERT(instr->HasPointerMap());
1014 __ CallRuntime(fun, argc, save_doubles);
1016 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1022 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
1023 if (context->IsRegister()) {
1027 }
else if (context->IsStackSlot()) {
1029 }
else if (context->IsConstantOperand()) {
1030 HConstant* constant =
1031 chunk_->LookupConstant(LConstantOperand::cast(context));
1032 __ LoadObject(
esi, Handle<Object>::cast(constant->handle(isolate())));
1040 LInstruction* instr,
1041 LOperand* context) {
1042 LoadContextFromDeferred(context);
1044 __ CallRuntimeSaveDoubles(
id);
1045 RecordSafepointWithRegisters(
1046 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
1052 void LCodeGen::RegisterEnvironmentForDeoptimization(
1053 LEnvironment* environment, Safepoint::DeoptMode mode) {
1054 if (!environment->HasBeenRegistered()) {
1068 int frame_count = 0;
1069 int jsframe_count = 0;
1076 Translation translation(&translations_, frame_count, jsframe_count, zone());
1078 int deoptimization_index = deoptimizations_.length();
1079 int pc_offset = masm()->pc_offset();
1080 environment->Register(deoptimization_index,
1081 translation.index(),
1082 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1083 deoptimizations_.
Add(environment, zone());
1089 LEnvironment* environment,
1091 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1092 ASSERT(environment->HasBeenRegistered());
1093 int id = environment->deoptimization_index();
1097 if (entry ==
NULL) {
1098 Abort(kBailoutWasNotPrepared);
1102 if (DeoptEveryNTimes()) {
1103 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1107 __ mov(
eax, Operand::StaticVariable(count));
1108 __ sub(
eax, Immediate(1));
1110 if (FLAG_trap_on_deopt)
__ int3();
1111 __ mov(
eax, Immediate(FLAG_deopt_every_n_times));
1112 __ mov(Operand::StaticVariable(count),
eax);
1118 __ mov(Operand::StaticVariable(count),
eax);
1127 if (x87_stack_.depth() > 0) {
1130 EmitFlushX87ForDeopt();
1134 if (
info()->ShouldTrapOnDeopt()) {
1147 if (jump_table_.is_empty() ||
1148 jump_table_.last().address != entry ||
1149 jump_table_.last().needs_frame != !frame_is_built_ ||
1150 jump_table_.last().bailout_type != bailout_type) {
1151 Deoptimizer::JumpTableEntry table_entry(entry,
1154 jump_table_.Add(table_entry, zone());
1157 __ jmp(&jump_table_.last().label);
1159 __ j(cc, &jump_table_.last().label);
1165 void LCodeGen::DeoptimizeIf(
Condition cc,
1166 LEnvironment* environment) {
1170 DeoptimizeIf(cc, environment, bailout_type);
1174 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
1175 int length = deoptimizations_.length();
1176 if (length == 0)
return;
1177 Handle<DeoptimizationInputData> data =
1178 factory()->NewDeoptimizationInputData(length,
TENURED);
1180 Handle<ByteArray> translations =
1181 translations_.CreateByteArray(isolate()->factory());
1182 data->SetTranslationByteArray(*translations);
1183 data->SetInlinedFunctionCount(
Smi::FromInt(inlined_function_count_));
1184 data->SetOptimizationId(
Smi::FromInt(info_->optimization_id()));
1185 if (info_->IsOptimizing()) {
1188 data->SetSharedFunctionInfo(*info_->shared_info());
1194 factory()->NewFixedArray(deoptimization_literals_.length(),
TENURED);
1196 for (
int i = 0; i < deoptimization_literals_.length(); i++) {
1197 literals->set(i, *deoptimization_literals_[i]);
1199 data->SetLiteralArray(*literals);
1202 data->SetOsrAstId(
Smi::FromInt(info_->osr_ast_id().ToInt()));
1206 for (
int i = 0; i < length; i++) {
1208 data->SetAstId(i, env->ast_id());
1209 data->SetTranslationIndex(i,
Smi::FromInt(env->translation_index()));
1210 data->SetArgumentsStackHeight(i,
1214 code->set_deoptimization_data(*data);
1218 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
1219 int result = deoptimization_literals_.length();
1220 for (
int i = 0; i < deoptimization_literals_.length(); ++i) {
1221 if (deoptimization_literals_[i].is_identical_to(literal))
return i;
1223 deoptimization_literals_.Add(literal, zone());
1228 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
1229 ASSERT(deoptimization_literals_.length() == 0);
1231 const ZoneList<Handle<JSFunction> >* inlined_closures =
1232 chunk()->inlined_closures();
1234 for (
int i = 0, length = inlined_closures->length();
1237 DefineDeoptimizationLiteral(inlined_closures->at(i));
1240 inlined_function_count_ = deoptimization_literals_.length();
1244 void LCodeGen::RecordSafepointWithLazyDeopt(
1245 LInstruction* instr, SafepointMode safepoint_mode) {
1246 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1247 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1249 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1250 RecordSafepointWithRegisters(
1251 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1256 void LCodeGen::RecordSafepoint(
1257 LPointerMap* pointers,
1258 Safepoint::Kind kind,
1260 Safepoint::DeoptMode deopt_mode) {
1261 ASSERT(kind == expected_safepoint_kind_);
1262 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1263 Safepoint safepoint =
1264 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1265 for (
int i = 0; i < operands->length(); i++) {
1266 LOperand* pointer = operands->at(i);
1267 if (pointer->IsStackSlot()) {
1268 safepoint.DefinePointerSlot(pointer->index(), zone());
1269 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1270 safepoint.DefinePointerRegister(
ToRegister(pointer), zone());
1276 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1277 Safepoint::DeoptMode mode) {
1278 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1282 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1283 LPointerMap empty_pointers(zone());
1284 RecordSafepoint(&empty_pointers, mode);
1288 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1290 Safepoint::DeoptMode mode) {
1291 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1295 void LCodeGen::RecordAndWritePosition(
int position) {
1296 if (position == RelocInfo::kNoPosition)
return;
1297 masm()->positions_recorder()->RecordPosition(position);
1298 masm()->positions_recorder()->WriteRecordedPositions();
1302 static const char* LabelType(LLabel* label) {
1303 if (label->is_loop_header())
return " (loop header)";
1304 if (label->is_osr_entry())
return " (OSR entry)";
1309 void LCodeGen::DoLabel(LLabel* label) {
1310 Comment(
";;; <@%d,#%d> -------------------- B%d%s --------------------",
1311 current_instruction_,
1312 label->hydrogen_value()->id(),
1315 __ bind(label->label());
1316 current_block_ = label->block_id();
1322 resolver_.Resolve(move);
1331 LParallelMove* move = gap->GetParallelMove(inner_pos);
1337 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1342 void LCodeGen::DoParameter(LParameter* instr) {
1347 void LCodeGen::DoCallStub(LCallStub* instr) {
1350 switch (instr->hydrogen()->major_key()) {
1351 case CodeStub::RegExpExec: {
1352 RegExpExecStub stub;
1353 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1356 case CodeStub::SubString: {
1358 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1361 case CodeStub::StringCompare: {
1362 StringCompareStub stub;
1363 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1372 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1373 GenerateOsrPrologue();
1377 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1378 Register dividend =
ToRegister(instr->dividend());
1379 int32_t divisor = instr->divisor();
1388 HMod* hmod = instr->hydrogen();
1389 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1390 Label dividend_is_not_negative, done;
1392 __ test(dividend, dividend);
1393 __ j(
not_sign, ÷nd_is_not_negative, Label::kNear);
1396 __ and_(dividend, mask);
1399 DeoptimizeIf(
zero, instr->environment());
1401 __ jmp(&done, Label::kNear);
1404 __ bind(÷nd_is_not_negative);
1405 __ and_(dividend, mask);
1410 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1411 Register dividend =
ToRegister(instr->dividend());
1412 int32_t divisor = instr->divisor();
1420 __ TruncatingDiv(dividend,
Abs(divisor));
1422 __ mov(
eax, dividend);
1426 HMod* hmod = instr->hydrogen();
1428 Label remainder_not_zero;
1429 __ j(
not_zero, &remainder_not_zero, Label::kNear);
1430 __ cmp(dividend, Immediate(0));
1431 DeoptimizeIf(
less, instr->environment());
1432 __ bind(&remainder_not_zero);
1437 void LCodeGen::DoModI(LModI* instr) {
1438 HMod* hmod = instr->hydrogen();
1440 Register left_reg =
ToRegister(instr->left());
1442 Register right_reg =
ToRegister(instr->right());
1445 Register result_reg =
ToRegister(instr->result());
1452 __ test(right_reg, Operand(right_reg));
1453 DeoptimizeIf(
zero, instr->environment());
1459 Label no_overflow_possible;
1461 __ j(
not_equal, &no_overflow_possible, Label::kNear);
1462 __ cmp(right_reg, -1);
1464 DeoptimizeIf(
equal, instr->environment());
1466 __ j(
not_equal, &no_overflow_possible, Label::kNear);
1467 __ Move(result_reg, Immediate(0));
1468 __ jmp(&done, Label::kNear);
1470 __ bind(&no_overflow_possible);
1478 Label positive_left;
1479 __ test(left_reg, Operand(left_reg));
1480 __ j(
not_sign, &positive_left, Label::kNear);
1482 __ test(result_reg, Operand(result_reg));
1483 DeoptimizeIf(
zero, instr->environment());
1484 __ jmp(&done, Label::kNear);
1485 __ bind(&positive_left);
1492 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1493 Register dividend =
ToRegister(instr->dividend());
1494 int32_t divisor = instr->divisor();
1495 Register result =
ToRegister(instr->result());
1497 ASSERT(!result.is(dividend));
1500 HDiv* hdiv = instr->hydrogen();
1502 __ test(dividend, dividend);
1503 DeoptimizeIf(
zero, instr->environment());
1508 DeoptimizeIf(
zero, instr->environment());
1512 divisor != 1 && divisor != -1) {
1513 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1514 __ test(dividend, Immediate(mask));
1515 DeoptimizeIf(
not_zero, instr->environment());
1517 __ Move(result, dividend);
1521 if (shift > 1)
__ sar(result, 31);
1522 __ shr(result, 32 - shift);
1523 __ add(result, dividend);
1524 __ sar(result, shift);
1526 if (divisor < 0)
__ neg(result);
1530 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1531 Register dividend =
ToRegister(instr->dividend());
1532 int32_t divisor = instr->divisor();
1541 HDiv* hdiv = instr->hydrogen();
1543 __ test(dividend, dividend);
1544 DeoptimizeIf(
zero, instr->environment());
1547 __ TruncatingDiv(dividend,
Abs(divisor));
1548 if (divisor < 0)
__ neg(
edx);
1553 __ sub(
eax, dividend);
1554 DeoptimizeIf(
not_equal, instr->environment());
1559 void LCodeGen::DoDivI(LDivI* instr) {
1560 HBinaryOperation* hdiv = instr->hydrogen();
1561 Register dividend =
ToRegister(instr->left());
1562 Register divisor =
ToRegister(instr->right());
1563 Register remainder =
ToRegister(instr->temp());
1564 Register result =
ToRegister(instr->result());
1573 __ test(divisor, divisor);
1574 DeoptimizeIf(
zero, instr->environment());
1579 Label dividend_not_zero;
1580 __ test(dividend, dividend);
1581 __ j(
not_zero, ÷nd_not_zero, Label::kNear);
1582 __ test(divisor, divisor);
1583 DeoptimizeIf(
sign, instr->environment());
1584 __ bind(÷nd_not_zero);
1589 Label dividend_not_min_int;
1591 __ j(
not_zero, ÷nd_not_min_int, Label::kNear);
1592 __ cmp(divisor, -1);
1593 DeoptimizeIf(
zero, instr->environment());
1594 __ bind(÷nd_not_min_int);
1601 if (hdiv->IsMathFloorOfDiv()) {
1603 __ test(remainder, remainder);
1604 __ j(
zero, &done, Label::kNear);
1605 __ xor_(remainder, divisor);
1606 __ sar(remainder, 31);
1607 __ add(result, remainder);
1611 __ test(remainder, remainder);
1612 DeoptimizeIf(
not_zero, instr->environment());
1617 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1618 Register dividend =
ToRegister(instr->dividend());
1619 int32_t divisor = instr->divisor();
1624 if (divisor == 1)
return;
1627 __ sar(dividend, shift);
1632 Label not_kmin_int, done;
1635 DeoptimizeIf(
zero, instr->environment());
1640 if (divisor == -1) {
1641 DeoptimizeIf(
overflow, instr->environment());
1644 __ mov(dividend, Immediate(
kMinInt / divisor));
1645 __ jmp(&done, Label::kNear);
1648 __ bind(¬_kmin_int);
1649 __ sar(dividend, shift);
1654 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1655 Register dividend =
ToRegister(instr->dividend());
1656 int32_t divisor = instr->divisor();
1665 HMathFloorOfDiv* hdiv = instr->hydrogen();
1667 __ test(dividend, dividend);
1668 DeoptimizeIf(
zero, instr->environment());
1675 __ TruncatingDiv(dividend,
Abs(divisor));
1676 if (divisor < 0)
__ neg(
edx);
1683 ASSERT(!temp.is(dividend) && !temp.is(
eax) && !temp.is(
edx));
1684 Label needs_adjustment, done;
1685 __ cmp(dividend, Immediate(0));
1686 __ j(divisor > 0 ?
less :
greater, &needs_adjustment, Label::kNear);
1687 __ TruncatingDiv(dividend,
Abs(divisor));
1688 if (divisor < 0)
__ neg(
edx);
1689 __ jmp(&done, Label::kNear);
1690 __ bind(&needs_adjustment);
1691 __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1692 __ TruncatingDiv(temp,
Abs(divisor));
1693 if (divisor < 0)
__ neg(
edx);
1699 void LCodeGen::DoMulI(LMulI* instr) {
1701 LOperand* right = instr->right();
1707 if (right->IsConstantOperand()) {
1711 int constant =
ToInteger32(LConstantOperand::cast(right));
1712 if (constant == -1) {
1714 }
else if (constant == 0) {
1715 __ xor_(left, Operand(left));
1716 }
else if (constant == 2) {
1717 __ add(left, Operand(left));
1727 __ lea(left, Operand(left, left,
times_2, 0));
1733 __ lea(left, Operand(left, left,
times_4, 0));
1739 __ lea(left, Operand(left, left,
times_8, 0));
1745 __ imul(left, left, constant);
1749 __ imul(left, left, constant);
1752 if (instr->hydrogen()->representation().IsSmi()) {
1759 DeoptimizeIf(
overflow, instr->environment());
1765 __ test(left, Operand(left));
1767 if (right->IsConstantOperand()) {
1768 if (
ToInteger32(LConstantOperand::cast(right)) < 0) {
1770 }
else if (
ToInteger32(LConstantOperand::cast(right)) == 0) {
1772 DeoptimizeIf(
less, instr->environment());
1777 DeoptimizeIf(
sign, instr->environment());
1784 void LCodeGen::DoBitI(LBitI* instr) {
1785 LOperand* left = instr->left();
1786 LOperand* right = instr->right();
1787 ASSERT(left->Equals(instr->result()));
1788 ASSERT(left->IsRegister());
1790 if (right->IsConstantOperand()) {
1793 instr->hydrogen()->representation());
1794 switch (instr->op()) {
1795 case Token::BIT_AND:
1801 case Token::BIT_XOR:
1802 if (right_operand ==
int32_t(~0)) {
1813 switch (instr->op()) {
1814 case Token::BIT_AND:
1820 case Token::BIT_XOR:
1831 void LCodeGen::DoShiftI(LShiftI* instr) {
1832 LOperand* left = instr->left();
1833 LOperand* right = instr->right();
1834 ASSERT(left->Equals(instr->result()));
1835 ASSERT(left->IsRegister());
1836 if (right->IsRegister()) {
1839 switch (instr->op()) {
1842 if (instr->can_deopt()) {
1844 DeoptimizeIf(
sign, instr->environment());
1852 if (instr->can_deopt()) {
1854 DeoptimizeIf(
sign, instr->environment());
1865 int value =
ToInteger32(LConstantOperand::cast(right));
1866 uint8_t shift_count =
static_cast<uint8_t
>(value & 0x1F);
1867 switch (instr->op()) {
1869 if (shift_count == 0 && instr->can_deopt()) {
1871 DeoptimizeIf(
sign, instr->environment());
1877 if (shift_count != 0) {
1882 if (shift_count == 0 && instr->can_deopt()) {
1884 DeoptimizeIf(
sign, instr->environment());
1890 if (shift_count != 0) {
1891 if (instr->hydrogen_value()->representation().IsSmi() &&
1892 instr->can_deopt()) {
1893 if (shift_count != 1) {
1897 DeoptimizeIf(
overflow, instr->environment());
1911 void LCodeGen::DoSubI(LSubI* instr) {
1912 LOperand* left = instr->left();
1913 LOperand* right = instr->right();
1914 ASSERT(left->Equals(instr->result()));
1916 if (right->IsConstantOperand()) {
1918 ToImmediate(right, instr->hydrogen()->representation()));
1923 DeoptimizeIf(
overflow, instr->environment());
1928 void LCodeGen::DoConstantI(LConstantI* instr) {
1929 __ Move(
ToRegister(instr->result()), Immediate(instr->value()));
1933 void LCodeGen::DoConstantS(LConstantS* instr) {
1934 __ Move(
ToRegister(instr->result()), Immediate(instr->value()));
1938 void LCodeGen::DoConstantD(LConstantD* instr) {
1939 double v = instr->value();
1940 uint64_t int_val = BitCast<uint64_t, double>(v);
1943 ASSERT(instr->result()->IsDoubleRegister());
1946 __ push(Immediate(upper));
1947 __ push(Immediate(lower));
1952 CpuFeatureScope scope1(masm(),
SSE2);
1959 CpuFeatureScope scope2(masm(),
SSE4_1);
1961 __ Move(temp, Immediate(lower));
1962 __ movd(res, Operand(temp));
1963 __ Move(temp, Immediate(upper));
1964 __ pinsrd(res, Operand(temp), 1);
1967 __ Move(temp, Immediate(upper));
1968 __ pinsrd(res, Operand(temp), 1);
1971 __ Move(temp, Immediate(upper));
1972 __ movd(res, Operand(temp));
1975 XMMRegister xmm_scratch = double_scratch0();
1976 __ Move(temp, Immediate(lower));
1977 __ movd(xmm_scratch, Operand(temp));
1978 __ orps(res, xmm_scratch);
1986 void LCodeGen::DoConstantE(LConstantE* instr) {
1987 __ lea(
ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1991 void LCodeGen::DoConstantT(LConstantT* instr) {
1993 Handle<Object>
handle = instr->value(isolate());
1995 __ LoadObject(reg, handle);
1999 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
2000 Register result =
ToRegister(instr->result());
2002 __ EnumLength(result, map);
2006 void LCodeGen::DoDateField(LDateField* instr) {
2008 Register result =
ToRegister(instr->result());
2009 Register scratch =
ToRegister(instr->temp());
2010 Smi* index = instr->index();
2011 Label runtime, done;
2012 ASSERT(
object.is(result));
2016 DeoptimizeIf(
zero, instr->environment());
2018 DeoptimizeIf(
not_equal, instr->environment());
2020 if (index->value() == 0) {
2024 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2025 __ mov(scratch, Operand::StaticVariable(stamp));
2030 __ jmp(&done, Label::kNear);
2033 __ PrepareCallCFunction(2, scratch);
2034 __ mov(Operand(
esp, 0),
object);
2036 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2042 Operand LCodeGen::BuildSeqStringOperand(Register
string,
2045 if (index->IsConstantOperand()) {
2061 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
2063 Register result =
ToRegister(instr->result());
2064 Register
string =
ToRegister(instr->string());
2066 if (FLAG_debug_code) {
2075 ? one_byte_seq_type : two_byte_seq_type));
2076 __ Check(
equal, kUnexpectedStringType);
2080 Operand operand = BuildSeqStringOperand(
string, instr->index(), encoding);
2082 __ movzx_b(result, operand);
2084 __ movzx_w(result, operand);
2089 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2091 Register
string =
ToRegister(instr->string());
2093 if (FLAG_debug_code) {
2100 ? one_byte_seq_type : two_byte_seq_type;
2101 __ EmitSeqStringSetCharCheck(
string, index, value, encoding_mask);
2104 Operand operand = BuildSeqStringOperand(
string, instr->index(), encoding);
2105 if (instr->value()->IsConstantOperand()) {
2111 __ mov_b(operand, static_cast<int8_t>(value));
2114 __ mov_w(operand, static_cast<int16_t>(value));
2119 __ mov_b(operand, value);
2121 __ mov_w(operand, value);
2127 void LCodeGen::DoAddI(LAddI* instr) {
2128 LOperand* left = instr->left();
2129 LOperand* right = instr->right();
2131 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2132 if (right->IsConstantOperand()) {
2134 instr->hydrogen()->representation());
2141 if (right->IsConstantOperand()) {
2143 ToImmediate(right, instr->hydrogen()->representation()));
2148 DeoptimizeIf(
overflow, instr->environment());
2154 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2155 CpuFeatureScope scope(masm(),
SSE2);
2156 LOperand* left = instr->left();
2157 LOperand* right = instr->right();
2158 ASSERT(left->Equals(instr->result()));
2159 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2160 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2162 Condition condition = (operation == HMathMinMax::kMathMin)
2165 if (right->IsConstantOperand()) {
2167 Immediate immediate =
ToImmediate(LConstantOperand::cast(instr->right()),
2168 instr->hydrogen()->representation());
2169 __ cmp(left_op, immediate);
2170 __ j(condition, &return_left, Label::kNear);
2171 __ mov(left_op, immediate);
2175 __ cmp(left_reg, right_op);
2176 __ j(condition, &return_left, Label::kNear);
2177 __ mov(left_reg, right_op);
2179 __ bind(&return_left);
2181 ASSERT(instr->hydrogen()->representation().IsDouble());
2182 Label check_nan_left, check_zero, return_left, return_right;
2186 __ ucomisd(left_reg, right_reg);
2188 __ j(
equal, &check_zero, Label::kNear);
2189 __ j(condition, &return_left, Label::kNear);
2190 __ jmp(&return_right, Label::kNear);
2192 __ bind(&check_zero);
2193 XMMRegister xmm_scratch = double_scratch0();
2194 __ xorps(xmm_scratch, xmm_scratch);
2195 __ ucomisd(left_reg, xmm_scratch);
2198 if (operation == HMathMinMax::kMathMin) {
2199 __ orpd(left_reg, right_reg);
2202 __ addsd(left_reg, right_reg);
2204 __ jmp(&return_left, Label::kNear);
2206 __ bind(&check_nan_left);
2207 __ ucomisd(left_reg, left_reg);
2209 __ bind(&return_right);
2210 __ movaps(left_reg, right_reg);
2212 __ bind(&return_left);
2217 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2219 CpuFeatureScope scope(masm(),
SSE2);
2223 switch (instr->op()) {
2225 __ addsd(left, right);
2228 __ subsd(left, right);
2231 __ mulsd(left, right);
2234 __ divsd(left, right);
2237 __ movaps(left, left);
2241 __ PrepareCallCFunction(4,
eax);
2245 ExternalReference::mod_two_doubles_operation(isolate()),
2251 __ fstp_d(Operand(
esp, 0));
2252 __ movsd(result, Operand(
esp, 0));
2264 if (instr->op() != Token::MOD) {
2267 switch (instr->op()) {
2282 __ PrepareCallCFunction(4,
eax);
2289 ExternalReference::mod_two_doubles_operation(isolate()),
2304 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2311 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2315 template<
class InstrType>
2316 void LCodeGen::EmitBranch(InstrType instr,
Condition cc) {
2317 int left_block = instr->TrueDestination(chunk_);
2318 int right_block = instr->FalseDestination(chunk_);
2320 int next_block = GetNextEmittedBlock();
2323 EmitGoto(left_block);
2324 }
else if (left_block == next_block) {
2326 }
else if (right_block == next_block) {
2327 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2329 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2330 __ jmp(chunk_->GetAssemblyLabel(right_block));
2335 template<
class InstrType>
2336 void LCodeGen::EmitFalseBranch(InstrType instr,
Condition cc) {
2337 int false_block = instr->FalseDestination(chunk_);
2339 __ jmp(chunk_->GetAssemblyLabel(false_block));
2341 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2346 void LCodeGen::DoBranch(LBranch* instr) {
2347 Representation r = instr->hydrogen()->value()->representation();
2348 if (r.IsSmiOrInteger32()) {
2350 __ test(reg, Operand(reg));
2352 }
else if (r.IsDouble()) {
2354 CpuFeatureScope scope(masm(),
SSE2);
2356 XMMRegister xmm_scratch = double_scratch0();
2357 __ xorps(xmm_scratch, xmm_scratch);
2358 __ ucomisd(reg, xmm_scratch);
2363 HType type = instr->hydrogen()->value()->
type();
2364 if (type.IsBoolean()) {
2366 __ cmp(reg, factory()->true_value());
2367 EmitBranch(instr,
equal);
2368 }
else if (type.IsSmi()) {
2370 __ test(reg, Operand(reg));
2372 }
else if (type.IsJSArray()) {
2375 }
else if (type.IsHeapNumber()) {
2377 CpuFeatureScope scope(masm(),
SSE2);
2378 XMMRegister xmm_scratch = double_scratch0();
2379 __ xorps(xmm_scratch, xmm_scratch);
2382 }
else if (type.IsString()) {
2387 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2392 __ cmp(reg, factory()->undefined_value());
2393 __ j(
equal, instr->FalseLabel(chunk_));
2397 __ cmp(reg, factory()->true_value());
2398 __ j(
equal, instr->TrueLabel(chunk_));
2400 __ cmp(reg, factory()->false_value());
2401 __ j(
equal, instr->FalseLabel(chunk_));
2405 __ cmp(reg, factory()->null_value());
2406 __ j(
equal, instr->FalseLabel(chunk_));
2411 __ test(reg, Operand(reg));
2412 __ j(
equal, instr->FalseLabel(chunk_));
2413 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2414 }
else if (expected.NeedsMap()) {
2417 DeoptimizeIf(
zero, instr->environment());
2421 if (expected.NeedsMap()) {
2426 if (expected.CanBeUndetectable()) {
2447 __ jmp(instr->FalseLabel(chunk_));
2448 __ bind(¬_string);
2454 __ j(
equal, instr->TrueLabel(chunk_));
2459 Label not_heap_number;
2461 factory()->heap_number_map());
2464 CpuFeatureScope scope(masm(),
SSE2);
2465 XMMRegister xmm_scratch = double_scratch0();
2466 __ xorps(xmm_scratch, xmm_scratch);
2473 __ j(
zero, instr->FalseLabel(chunk_));
2474 __ jmp(instr->TrueLabel(chunk_));
2475 __ bind(¬_heap_number);
2478 if (!expected.IsGeneric()) {
2488 void LCodeGen::EmitGoto(
int block) {
2495 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2499 void LCodeGen::DoGoto(LGoto* instr) {
2500 EmitGoto(instr->block_id());
2508 case Token::EQ_STRICT:
2512 case Token::NE_STRICT:
2528 case Token::INSTANCEOF:
2536 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2537 LOperand* left = instr->left();
2538 LOperand* right = instr->right();
2539 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2541 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2543 double left_val =
ToDouble(LConstantOperand::cast(left));
2544 double right_val =
ToDouble(LConstantOperand::cast(right));
2545 int next_block =
EvalComparison(instr->op(), left_val, right_val) ?
2546 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2547 EmitGoto(next_block);
2549 if (instr->is_double()) {
2551 CpuFeatureScope scope(masm(),
SSE2);
2561 if (right->IsConstantOperand()) {
2563 ToImmediate(right, instr->hydrogen()->representation()));
2564 }
else if (left->IsConstantOperand()) {
2566 ToImmediate(left, instr->hydrogen()->representation()));
2573 EmitBranch(instr, cc);
2578 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2581 if (instr->right()->IsConstantOperand()) {
2582 Handle<Object> right =
ToHandle(LConstantOperand::cast(instr->right()));
2583 __ CmpObject(left, right);
2585 Operand right =
ToOperand(instr->right());
2586 __ cmp(left, right);
2588 EmitBranch(instr,
equal);
2592 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2593 if (instr->hydrogen()->representation().IsTagged()) {
2594 Register input_reg =
ToRegister(instr->object());
2595 __ cmp(input_reg, factory()->the_hole_value());
2596 EmitBranch(instr,
equal);
2602 CpuFeatureScope scope(masm(),
SSE2);
2604 __ ucomisd(input_reg, input_reg);
2623 CpuFeatureScope scope(masm(),
SSE2);
2633 EmitBranch(instr,
equal);
2637 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2638 Representation rep = instr->hydrogen()->value()->representation();
2639 ASSERT(!rep.IsInteger32());
2640 Register scratch =
ToRegister(instr->temp());
2642 if (rep.IsDouble()) {
2643 CpuFeatureScope use_sse2(masm(),
SSE2);
2645 XMMRegister xmm_scratch = double_scratch0();
2646 __ xorps(xmm_scratch, xmm_scratch);
2647 __ ucomisd(xmm_scratch, value);
2649 __ movmskpd(scratch, value);
2650 __ test(scratch, Immediate(1));
2654 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2655 __ CheckMap(value, map, instr->FalseLabel(chunk()),
DO_SMI_CHECK);
2660 Immediate(0x00000000));
2661 EmitBranch(instr,
equal);
2666 Condition LCodeGen::EmitIsObject(Register input,
2668 Label* is_not_object,
2670 __ JumpIfSmi(input, is_not_object);
2672 __ cmp(input, isolate()->factory()->null_value());
2689 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2694 reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2696 EmitBranch(instr, true_cond);
2700 Condition LCodeGen::EmitIsString(Register input,
2702 Label* is_not_string,
2705 __ JumpIfSmi(input, is_not_string);
2708 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2714 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2719 instr->hydrogen()->value()->IsHeapObject()
2723 reg, temp, instr->FalseLabel(chunk_), check_needed);
2725 EmitBranch(instr, true_cond);
2729 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2730 Operand input =
ToOperand(instr->value());
2733 EmitBranch(instr,
zero);
2737 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2741 if (!instr->hydrogen()->value()->IsHeapObject()) {
2743 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2754 case Token::EQ_STRICT:
2772 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2776 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2778 Condition condition = ComputeCompareCondition(op);
2781 EmitBranch(instr, condition);
2785 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2794 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2797 if (from == to)
return equal;
2805 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2809 if (!instr->hydrogen()->value()->IsHeapObject()) {
2810 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2813 __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2814 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2818 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2820 Register result =
ToRegister(instr->result());
2822 __ AssertString(input);
2825 __ IndexFromHash(result, result);
2829 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2830 LHasCachedArrayIndexAndBranch* instr) {
2835 EmitBranch(instr,
equal);
2841 void LCodeGen::EmitClassOfTest(Label* is_true,
2843 Handle<String>class_name,
2848 ASSERT(!input.is(temp2));
2850 __ JumpIfSmi(input, is_false);
2899 __ cmp(temp, class_name);
2904 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2909 Handle<String> class_name = instr->hydrogen()->class_name();
2911 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2912 class_name, input, temp, temp2);
2914 EmitBranch(instr,
equal);
2918 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2921 EmitBranch(instr,
equal);
2925 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2929 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2931 Label true_value, done;
2933 __ j(
zero, &true_value, Label::kNear);
2934 __ mov(
ToRegister(instr->result()), factory()->false_value());
2935 __ jmp(&done, Label::kNear);
2936 __ bind(&true_value);
2937 __ mov(
ToRegister(instr->result()), factory()->true_value());
2942 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2945 DeferredInstanceOfKnownGlobal(
LCodeGen* codegen,
2946 LInstanceOfKnownGlobal* instr,
2947 const X87Stack& x87_stack)
2950 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2952 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
2953 Label* map_check() {
return &map_check_; }
2955 LInstanceOfKnownGlobal* instr_;
2959 DeferredInstanceOfKnownGlobal* deferred;
2960 deferred =
new(zone()) DeferredInstanceOfKnownGlobal(
this, instr, x87_stack_);
2962 Label done, false_result;
2963 Register
object =
ToRegister(instr->value());
2967 __ JumpIfSmi(
object, &false_result, Label::kNear);
2975 __ bind(deferred->map_check());
2976 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2977 __ cmp(map, Operand::ForCell(cache_cell));
2979 __ mov(
eax, factory()->the_hole_value());
2980 __ jmp(&done, Label::kNear);
2984 __ bind(&cache_miss);
2986 __ cmp(
object, factory()->null_value());
2987 __ j(
equal, &false_result, Label::kNear);
2990 Condition is_string = masm_->IsObjectStringType(
object, temp, temp);
2991 __ j(is_string, &false_result, Label::kNear);
2994 __ jmp(deferred->entry());
2996 __ bind(&false_result);
2997 __ mov(
ToRegister(instr->result()), factory()->false_value());
3001 __ bind(deferred->exit());
3008 PushSafepointRegistersScope scope(
this);
3017 InstanceofStub stub(flags);
3024 ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
3026 static const int kAdditionalDelta = 13;
3027 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
3028 __ mov(temp, Immediate(delta));
3029 __ StoreToSafepointRegisterSlot(temp, temp);
3030 CallCodeGeneric(stub.GetCode(isolate()),
3031 RelocInfo::CODE_TARGET,
3033 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3036 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3037 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3040 __ StoreToSafepointRegisterSlot(
eax,
eax);
3044 void LCodeGen::DoCmpT(LCmpT* instr) {
3048 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3050 Condition condition = ComputeCompareCondition(op);
3051 Label true_value, done;
3053 __ j(condition, &true_value, Label::kNear);
3054 __ mov(
ToRegister(instr->result()), factory()->false_value());
3055 __ jmp(&done, Label::kNear);
3056 __ bind(&true_value);
3057 __ mov(
ToRegister(instr->result()), factory()->true_value());
3062 void LCodeGen::EmitReturn(LReturn* instr,
bool dynamic_frame_alignment) {
3063 int extra_value_count = dynamic_frame_alignment ? 2 : 1;
3065 if (instr->has_constant_parameter_count()) {
3066 int parameter_count =
ToInteger32(instr->constant_parameter_count());
3067 if (dynamic_frame_alignment && FLAG_debug_code) {
3071 __ Assert(
equal, kExpectedAlignmentMarker);
3075 Register reg =
ToRegister(instr->parameter_count());
3078 Register return_addr_reg = reg.is(
ecx) ?
ebx :
ecx;
3079 if (dynamic_frame_alignment && FLAG_debug_code) {
3080 ASSERT(extra_value_count == 2);
3084 __ Assert(
equal, kExpectedAlignmentMarker);
3088 __ pop(return_addr_reg);
3089 if (dynamic_frame_alignment) {
3094 __ jmp(return_addr_reg);
3099 void LCodeGen::DoReturn(LReturn* instr) {
3100 if (FLAG_trace &&
info()->IsOptimizing()) {
3107 __ CallRuntime(Runtime::kTraceExit, 1);
3110 RestoreCallerDoubles();
3112 if (dynamic_frame_alignment_) {
3117 int no_frame_start = -1;
3121 no_frame_start = masm_->pc_offset();
3123 if (dynamic_frame_alignment_) {
3126 __ j(
equal, &no_padding, Label::kNear);
3128 EmitReturn(instr,
true);
3129 __ bind(&no_padding);
3132 EmitReturn(instr,
false);
3133 if (no_frame_start != -1) {
3134 info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
3139 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3140 Register result =
ToRegister(instr->result());
3141 __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
3142 if (instr->hydrogen()->RequiresHoleCheck()) {
3143 __ cmp(result, factory()->the_hole_value());
3144 DeoptimizeIf(
equal, instr->environment());
3149 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3154 __ mov(
ecx, instr->name());
3157 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3161 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3163 Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
3169 if (instr->hydrogen()->RequiresHoleCheck()) {
3170 __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
3171 DeoptimizeIf(
equal, instr->environment());
3175 __ mov(Operand::ForCell(cell_handle), value);
3180 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3181 Register context =
ToRegister(instr->context());
3182 Register result =
ToRegister(instr->result());
3185 if (instr->hydrogen()->RequiresHoleCheck()) {
3186 __ cmp(result, factory()->the_hole_value());
3187 if (instr->hydrogen()->DeoptimizesOnHole()) {
3188 DeoptimizeIf(
equal, instr->environment());
3192 __ mov(result, factory()->undefined_value());
3193 __ bind(&is_not_hole);
3199 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3200 Register context =
ToRegister(instr->context());
3203 Label skip_assignment;
3206 if (instr->hydrogen()->RequiresHoleCheck()) {
3207 __ cmp(target, factory()->the_hole_value());
3208 if (instr->hydrogen()->DeoptimizesOnHole()) {
3209 DeoptimizeIf(
equal, instr->environment());
3215 __ mov(target, value);
3216 if (instr->hydrogen()->NeedsWriteBarrier()) {
3218 instr->hydrogen()->value()->IsHeapObject()
3222 __ RecordWriteContextSlot(context,
3226 GetSaveFPRegsMode(),
3231 __ bind(&skip_assignment);
3235 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3236 HObjectAccess access = instr->hydrogen()->access();
3237 int offset = access.offset();
3239 if (access.IsExternalMemory()) {
3240 Register result =
ToRegister(instr->result());
3241 MemOperand operand = instr->object()->IsConstantOperand()
3242 ? MemOperand::StaticVariable(ToExternalReference(
3243 LConstantOperand::cast(instr->object())))
3245 __ Load(result, operand, access.representation());
3249 Register
object =
ToRegister(instr->object());
3250 if (instr->hydrogen()->representation().IsDouble()) {
3252 CpuFeatureScope scope(masm(),
SSE2);
3261 Register result =
ToRegister(instr->result());
3262 if (!access.IsInobject()) {
3270 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
3271 ASSERT(!operand->IsDoubleRegister());
3272 if (operand->IsConstantOperand()) {
3273 Handle<Object>
object =
ToHandle(LConstantOperand::cast(operand));
3275 if (object->IsSmi()) {
3276 __ Push(Handle<Smi>::cast(
object));
3278 __ PushHeapObject(Handle<HeapObject>::cast(
object));
3280 }
else if (operand->IsRegister()) {
3288 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3293 __ mov(
ecx, instr->name());
3295 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3299 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3300 Register
function =
ToRegister(instr->function());
3302 Register result =
ToRegister(instr->result());
3306 DeoptimizeIf(
not_equal, instr->environment());
3319 __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3320 DeoptimizeIf(
equal, instr->environment());
3329 __ jmp(&done, Label::kNear);
3333 __ bind(&non_instance);
3341 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3342 Register result =
ToRegister(instr->result());
3343 __ LoadRoot(result, instr->index());
3347 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3348 Register arguments =
ToRegister(instr->arguments());
3349 Register result =
ToRegister(instr->result());
3350 if (instr->length()->IsConstantOperand() &&
3351 instr->index()->IsConstantOperand()) {
3352 int const_index =
ToInteger32(LConstantOperand::cast(instr->index()));
3353 int const_length =
ToInteger32(LConstantOperand::cast(instr->length()));
3354 int index = (const_length - const_index) + 1;
3357 Register length =
ToRegister(instr->length());
3358 Operand index =
ToOperand(instr->index());
3361 __ sub(length, index);
3367 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3369 LOperand* key = instr->key();
3370 if (!key->IsConstantOperand() &&
3371 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3375 Operand operand(BuildFastArrayOperand(
3378 instr->hydrogen()->key()->representation(),
3381 instr->additional_index()));
3385 CpuFeatureScope scope(masm(),
SSE2);
3387 __ movss(result, operand);
3388 __ cvtss2sd(result, result);
3395 CpuFeatureScope scope(masm(),
SSE2);
3401 Register result(
ToRegister(instr->result()));
3402 switch (elements_kind) {
3405 __ movsx_b(result, operand);
3411 __ movzx_b(result, operand);
3415 __ movsx_w(result, operand);
3419 __ movzx_w(result, operand);
3423 __ mov(result, operand);
3427 __ mov(result, operand);
3429 __ test(result, Operand(result));
3430 DeoptimizeIf(
negative, instr->environment());
3452 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3453 if (instr->hydrogen()->RequiresHoleCheck()) {
3456 Operand hole_check_operand = BuildFastArrayOperand(
3457 instr->elements(), instr->key(),
3458 instr->hydrogen()->key()->representation(),
3461 instr->additional_index());
3463 DeoptimizeIf(
equal, instr->environment());
3466 Operand double_load_operand = BuildFastArrayOperand(
3469 instr->hydrogen()->key()->representation(),
3472 instr->additional_index());
3474 CpuFeatureScope scope(masm(),
SSE2);
3476 __ movsd(result, double_load_operand);
3483 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3484 Register result =
ToRegister(instr->result());
3488 BuildFastArrayOperand(instr->elements(),
3490 instr->hydrogen()->key()->representation(),
3493 instr->additional_index()));
3496 if (instr->hydrogen()->RequiresHoleCheck()) {
3499 DeoptimizeIf(
not_equal, instr->environment());
3501 __ cmp(result, factory()->the_hole_value());
3502 DeoptimizeIf(
equal, instr->environment());
3508 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3509 if (instr->is_typed_elements()) {
3510 DoLoadKeyedExternalArray(instr);
3511 }
else if (instr->hydrogen()->representation().IsDouble()) {
3512 DoLoadKeyedFixedDoubleArray(instr);
3514 DoLoadKeyedFixedArray(instr);
3519 Operand LCodeGen::BuildFastArrayOperand(
3520 LOperand* elements_pointer,
3522 Representation key_representation,
3525 uint32_t additional_index) {
3526 Register elements_pointer_reg =
ToRegister(elements_pointer);
3531 int shift_size = element_shift_size;
3532 if (key->IsConstantOperand()) {
3533 int constant_value =
ToInteger32(LConstantOperand::cast(key));
3534 if (constant_value & 0xF0000000) {
3535 Abort(kArrayIndexConstantValueTooBig);
3537 return Operand(elements_pointer_reg,
3538 ((constant_value + additional_index) << shift_size)
3542 if (key_representation.IsSmi() && (shift_size >= 1)) {
3546 return Operand(elements_pointer_reg,
3549 offset + (additional_index << element_shift_size));
3554 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3559 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3560 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3564 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3565 Register result =
ToRegister(instr->result());
3567 if (instr->hydrogen()->from_inlined()) {
3571 Label done, adapted;
3574 __ cmp(Operand(result),
3576 __ j(
equal, &adapted, Label::kNear);
3579 __ mov(result, Operand(
ebp));
3580 __ jmp(&done, Label::kNear);
3593 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3594 Operand elem =
ToOperand(instr->elements());
3595 Register result =
ToRegister(instr->result());
3601 __ mov(result, Immediate(scope()->num_parameters()));
3602 __ j(
equal, &done, Label::kNear);
3606 __ mov(result, Operand(result,
3608 __ SmiUntag(result);
3615 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3616 Register receiver =
ToRegister(instr->receiver());
3617 Register
function =
ToRegister(instr->function());
3622 Label receiver_ok, global_object;
3623 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3624 Register scratch =
ToRegister(instr->temp());
3626 if (!instr->hydrogen()->known_function()) {
3642 __ cmp(receiver, factory()->null_value());
3643 __ j(
equal, &global_object, Label::kNear);
3644 __ cmp(receiver, factory()->undefined_value());
3645 __ j(
equal, &global_object, Label::kNear);
3649 DeoptimizeIf(
equal, instr->environment());
3651 DeoptimizeIf(
below, instr->environment());
3653 __ jmp(&receiver_ok, Label::kNear);
3654 __ bind(&global_object);
3657 __ mov(receiver, Operand(receiver, global_offset));
3660 __ bind(&receiver_ok);
3664 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3665 Register receiver =
ToRegister(instr->receiver());
3666 Register
function =
ToRegister(instr->function());
3667 Register length =
ToRegister(instr->length());
3668 Register elements =
ToRegister(instr->elements());
3675 const uint32_t kArgumentsLimit = 1 *
KB;
3676 __ cmp(length, kArgumentsLimit);
3677 DeoptimizeIf(
above, instr->environment());
3680 __ mov(receiver, length);
3686 __ test(length, Operand(length));
3687 __ j(
zero, &invoke, Label::kNear);
3695 ASSERT(instr->HasPointerMap());
3696 LPointerMap* pointers = instr->pointer_map();
3698 this, pointers, Safepoint::kLazyDeopt);
3699 ParameterCount actual(
eax);
3700 __ InvokeFunction(
function, actual,
CALL_FUNCTION, safepoint_generator);
3704 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3709 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3710 LOperand* argument = instr->value();
3711 EmitPushTaggedOperand(argument);
3715 void LCodeGen::DoDrop(LDrop* instr) {
3716 __ Drop(instr->count());
3720 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3721 Register result =
ToRegister(instr->result());
3726 void LCodeGen::DoContext(LContext* instr) {
3727 Register result =
ToRegister(instr->result());
3728 if (
info()->IsOptimizing()) {
3737 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3740 __ push(Immediate(instr->hydrogen()->pairs()));
3742 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3746 void LCodeGen::CallKnownFunction(Handle<JSFunction>
function,
3749 LInstruction* instr,
3750 EDIState edi_state) {
3751 bool dont_adapt_arguments =
3753 bool can_invoke_directly =
3754 dont_adapt_arguments || formal_parameter_count == arity;
3756 if (can_invoke_directly) {
3757 if (edi_state == EDI_UNINITIALIZED) {
3758 __ LoadHeapObject(
edi,
function);
3766 if (dont_adapt_arguments) {
3771 if (
function.is_identical_to(
info()->closure())) {
3776 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3779 LPointerMap* pointers = instr->pointer_map();
3781 this, pointers, Safepoint::kLazyDeopt);
3782 ParameterCount count(arity);
3783 ParameterCount expected(formal_parameter_count);
3784 __ InvokeFunction(
function, expected, count,
CALL_FUNCTION, generator);
3789 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3792 LPointerMap* pointers = instr->pointer_map();
3795 if (instr->target()->IsConstantOperand()) {
3796 LConstantOperand* target = LConstantOperand::cast(instr->target());
3798 generator.BeforeCall(
__ CallSize(code, RelocInfo::CODE_TARGET));
3799 __ call(code, RelocInfo::CODE_TARGET);
3801 ASSERT(instr->target()->IsRegister());
3802 Register target =
ToRegister(instr->target());
3803 generator.BeforeCall(
__ CallSize(Operand(target)));
3807 generator.AfterCall();
3811 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3815 if (instr->hydrogen()->pass_argument_count()) {
3816 __ mov(
eax, instr->arity());
3822 bool is_self_call =
false;
3823 if (instr->hydrogen()->function()->IsConstant()) {
3824 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3825 Handle<JSFunction> jsfun =
3827 is_self_call = jsfun.is_identical_to(
info()->closure());
3836 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3841 Register input_reg =
ToRegister(instr->value());
3843 factory()->heap_number_map());
3844 DeoptimizeIf(
not_equal, instr->environment());
3846 Label slow, allocated, done;
3847 Register tmp = input_reg.is(
eax) ?
ecx :
eax;
3851 PushSafepointRegistersScope scope(
this);
3859 __ j(
zero, &done, Label::kNear);
3861 __ AllocateHeapNumber(tmp, tmp2,
no_reg, &slow);
3862 __ jmp(&allocated, Label::kNear);
3866 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
3867 instr, instr->context());
3869 if (!tmp.is(
eax))
__ mov(tmp,
eax);
3871 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3873 __ bind(&allocated);
3879 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3885 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3886 Register input_reg =
ToRegister(instr->value());
3887 __ test(input_reg, Operand(input_reg));
3891 DeoptimizeIf(
negative, instr->environment());
3892 __ bind(&is_positive);
3896 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3900 DeferredMathAbsTaggedHeapNumber(
LCodeGen* codegen,
3902 const X87Stack& x87_stack)
3905 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3907 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
3912 ASSERT(instr->value()->Equals(instr->result()));
3913 Representation r = instr->hydrogen()->value()->representation();
3915 CpuFeatureScope scope(masm(),
SSE2);
3917 XMMRegister scratch = double_scratch0();
3919 __ xorps(scratch, scratch);
3920 __ subsd(scratch, input_reg);
3921 __ andps(input_reg, scratch);
3922 }
else if (r.IsSmiOrInteger32()) {
3923 EmitIntegerMathAbs(instr);
3925 DeferredMathAbsTaggedHeapNumber* deferred =
3926 new(zone()) DeferredMathAbsTaggedHeapNumber(
this, instr, x87_stack_);
3927 Register input_reg =
ToRegister(instr->value());
3929 __ JumpIfNotSmi(input_reg, deferred->entry());
3930 EmitIntegerMathAbs(instr);
3931 __ bind(deferred->exit());
3936 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3937 CpuFeatureScope scope(masm(),
SSE2);
3938 XMMRegister xmm_scratch = double_scratch0();
3939 Register output_reg =
ToRegister(instr->result());
3943 CpuFeatureScope scope(masm(),
SSE4_1);
3947 __ xorps(xmm_scratch, xmm_scratch);
3948 __ ucomisd(input_reg, xmm_scratch);
3950 __ movmskpd(output_reg, input_reg);
3951 __ test(output_reg, Immediate(1));
3952 DeoptimizeIf(
not_zero, instr->environment());
3956 __ cvttsd2si(output_reg, Operand(xmm_scratch));
3958 __ cmp(output_reg, 0x1);
3959 DeoptimizeIf(
overflow, instr->environment());
3961 Label negative_sign, done;
3963 __ xorps(xmm_scratch, xmm_scratch);
3964 __ ucomisd(input_reg, xmm_scratch);
3966 __ j(
below, &negative_sign, Label::kNear);
3970 Label positive_sign;
3971 __ j(
above, &positive_sign, Label::kNear);
3972 __ movmskpd(output_reg, input_reg);
3973 __ test(output_reg, Immediate(1));
3974 DeoptimizeIf(
not_zero, instr->environment());
3975 __ Move(output_reg, Immediate(0));
3976 __ jmp(&done, Label::kNear);
3977 __ bind(&positive_sign);
3981 __ cvttsd2si(output_reg, Operand(input_reg));
3983 __ cmp(output_reg, 0x1);
3984 DeoptimizeIf(
overflow, instr->environment());
3985 __ jmp(&done, Label::kNear);
3988 __ bind(&negative_sign);
3990 __ cvttsd2si(output_reg, Operand(input_reg));
3991 __ Cvtsi2sd(xmm_scratch, output_reg);
3992 __ ucomisd(input_reg, xmm_scratch);
3993 __ j(
equal, &done, Label::kNear);
3994 __ sub(output_reg, Immediate(1));
3995 DeoptimizeIf(
overflow, instr->environment());
4002 void LCodeGen::DoMathRound(LMathRound* instr) {
4003 CpuFeatureScope scope(masm(),
SSE2);
4004 Register output_reg =
ToRegister(instr->result());
4006 XMMRegister xmm_scratch = double_scratch0();
4008 ExternalReference one_half = ExternalReference::address_of_one_half();
4009 ExternalReference minus_one_half =
4010 ExternalReference::address_of_minus_one_half();
4012 Label done, round_to_zero, below_one_half, do_not_compensate;
4013 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4015 __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
4016 __ ucomisd(xmm_scratch, input_reg);
4017 __ j(
above, &below_one_half, Label::kNear);
4020 __ addsd(xmm_scratch, input_reg);
4021 __ cvttsd2si(output_reg, Operand(xmm_scratch));
4023 __ cmp(output_reg, 0x1);
4024 __ RecordComment(
"D2I conversion overflow");
4025 DeoptimizeIf(
overflow, instr->environment());
4026 __ jmp(&done, dist);
4028 __ bind(&below_one_half);
4029 __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
4030 __ ucomisd(xmm_scratch, input_reg);
4035 __ movaps(input_temp, input_reg);
4036 __ subsd(input_temp, xmm_scratch);
4037 __ cvttsd2si(output_reg, Operand(input_temp));
4039 __ cmp(output_reg, 0x1);
4040 __ RecordComment(
"D2I conversion overflow");
4041 DeoptimizeIf(
overflow, instr->environment());
4043 __ Cvtsi2sd(xmm_scratch, output_reg);
4044 __ ucomisd(xmm_scratch, input_temp);
4046 __ sub(output_reg, Immediate(1));
4048 __ jmp(&done, dist);
4050 __ bind(&round_to_zero);
4055 __ movmskpd(output_reg, input_reg);
4056 __ test(output_reg, Immediate(1));
4057 __ RecordComment(
"Minus zero");
4058 DeoptimizeIf(
not_zero, instr->environment());
4060 __ Move(output_reg, Immediate(0));
4065 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4066 CpuFeatureScope scope(masm(),
SSE2);
4069 __ sqrtsd(input_reg, input_reg);
4073 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4074 CpuFeatureScope scope(masm(),
SSE2);
4075 XMMRegister xmm_scratch = double_scratch0();
4077 Register scratch =
ToRegister(instr->temp());
4086 __ mov(scratch, 0xFF800000);
4087 __ movd(xmm_scratch, scratch);
4088 __ cvtss2sd(xmm_scratch, xmm_scratch);
4089 __ ucomisd(input_reg, xmm_scratch);
4093 __ j(
carry, &sqrt, Label::kNear);
4095 __ xorps(input_reg, input_reg);
4096 __ subsd(input_reg, xmm_scratch);
4097 __ jmp(&done, Label::kNear);
4101 __ xorps(xmm_scratch, xmm_scratch);
4102 __ addsd(input_reg, xmm_scratch);
4103 __ sqrtsd(input_reg, input_reg);
4108 void LCodeGen::DoPower(LPower* instr) {
4109 Representation exponent_type = instr->hydrogen()->right()->representation();
4112 ASSERT(!instr->right()->IsDoubleRegister() ||
4114 ASSERT(!instr->right()->IsRegister() ||
4119 if (exponent_type.IsSmi()) {
4122 }
else if (exponent_type.IsTagged()) {
4124 __ JumpIfSmi(
eax, &no_deopt);
4126 DeoptimizeIf(
not_equal, instr->environment());
4130 }
else if (exponent_type.IsInteger32()) {
4134 ASSERT(exponent_type.IsDouble());
4141 void LCodeGen::DoMathLog(LMathLog* instr) {
4142 CpuFeatureScope scope(masm(),
SSE2);
4143 ASSERT(instr->value()->Equals(instr->result()));
4145 XMMRegister xmm_scratch = double_scratch0();
4147 __ xorps(xmm_scratch, xmm_scratch);
4148 __ ucomisd(input_reg, xmm_scratch);
4149 __ j(
above, &positive, Label::kNear);
4151 ExternalReference nan =
4152 ExternalReference::address_of_canonical_non_hole_nan();
4153 __ movsd(input_reg, Operand::StaticVariable(nan));
4154 __ jmp(&done, Label::kNear);
4156 ExternalReference ninf =
4157 ExternalReference::address_of_negative_infinity();
4158 __ movsd(input_reg, Operand::StaticVariable(ninf));
4159 __ jmp(&done, Label::kNear);
4163 __ movsd(Operand(
esp, 0), input_reg);
4164 __ fld_d(Operand(
esp, 0));
4166 __ fstp_d(Operand(
esp, 0));
4167 __ movsd(input_reg, Operand(
esp, 0));
4173 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4174 CpuFeatureScope scope(masm(),
SSE2);
4176 Register result =
ToRegister(instr->result());
4177 Label not_zero_input;
4178 __ bsr(result, input);
4181 __ Move(result, Immediate(63));
4183 __ bind(¬_zero_input);
4184 __ xor_(result, Immediate(31));
4188 void LCodeGen::DoMathExp(LMathExp* instr) {
4189 CpuFeatureScope scope(masm(),
SSE2);
4192 XMMRegister temp0 = double_scratch0();
4200 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4203 ASSERT(instr->HasPointerMap());
4205 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4206 if (known_function.is_null()) {
4207 LPointerMap* pointers = instr->pointer_map();
4209 this, pointers, Safepoint::kLazyDeopt);
4210 ParameterCount count(instr->arity());
4213 CallKnownFunction(known_function,
4214 instr->hydrogen()->formal_parameter_count(),
4217 EDI_CONTAINS_TARGET);
4222 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4227 int arity = instr->arity();
4228 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
4229 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4233 void LCodeGen::DoCallNew(LCallNew* instr) {
4239 __ mov(
ebx, isolate()->factory()->undefined_value());
4241 __ Move(
eax, Immediate(instr->arity()));
4242 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4246 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4251 __ Move(
eax, Immediate(instr->arity()));
4252 __ mov(
ebx, isolate()->factory()->undefined_value());
4253 ElementsKind kind = instr->hydrogen()->elements_kind();
4259 if (instr->arity() == 0) {
4260 ArrayNoArgumentConstructorStub stub(kind, override_mode);
4261 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4262 }
else if (instr->arity() == 1) {
4270 __ j(zero, &packed_case, Label::kNear);
4273 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
4274 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4275 __ jmp(&done, Label::kNear);
4276 __ bind(&packed_case);
4279 ArraySingleArgumentConstructorStub stub(kind, override_mode);
4280 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4283 ArrayNArgumentsConstructorStub stub(kind, override_mode);
4284 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4289 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4291 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4295 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4296 Register
function =
ToRegister(instr->function());
4297 Register code_object =
ToRegister(instr->code_object());
4303 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4304 Register result =
ToRegister(instr->result());
4305 Register base =
ToRegister(instr->base_object());
4306 if (instr->offset()->IsConstantOperand()) {
4307 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4310 Register offset =
ToRegister(instr->offset());
4311 __ lea(result, Operand(base, offset,
times_1, 0));
4316 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4317 Representation representation = instr->representation();
4319 HObjectAccess access = instr->hydrogen()->access();
4320 int offset = access.offset();
4322 if (access.IsExternalMemory()) {
4323 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4324 MemOperand operand = instr->object()->IsConstantOperand()
4325 ? MemOperand::StaticVariable(
4326 ToExternalReference(LConstantOperand::cast(instr->object())))
4328 if (instr->value()->IsConstantOperand()) {
4329 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4333 __ Store(value, operand, representation);
4338 Register
object =
ToRegister(instr->object());
4339 Handle<Map> transition = instr->transition();
4341 instr->hydrogen()->value()->IsHeapObject()
4344 ASSERT(!(representation.IsSmi() &&
4345 instr->value()->IsConstantOperand() &&
4346 !
IsSmi(LConstantOperand::cast(instr->value()))));
4347 if (representation.IsHeapObject()) {
4348 if (instr->value()->IsConstantOperand()) {
4349 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4350 if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
4354 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4357 DeoptimizeIf(zero, instr->environment());
4363 }
else if (representation.IsDouble()) {
4364 ASSERT(transition.is_null());
4365 ASSERT(access.IsInobject());
4366 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4368 CpuFeatureScope scope(masm(),
SSE2);
4378 if (!transition.is_null()) {
4379 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
4383 Register temp_map =
ToRegister(instr->temp_map());
4384 __ mov(temp_map, transition);
4387 __ RecordWriteField(
object,
4391 GetSaveFPRegsMode(),
4398 Register write_register = object;
4399 if (!access.IsInobject()) {
4405 if (instr->value()->IsConstantOperand()) {
4406 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4407 if (operand_value->IsRegister()) {
4409 __ Store(value, operand, representation);
4410 }
else if (representation.IsInteger32()) {
4411 Immediate immediate =
ToImmediate(operand_value, representation);
4412 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4413 __ mov(operand, immediate);
4415 Handle<Object> handle_value =
ToHandle(operand_value);
4416 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4417 __ mov(operand, handle_value);
4421 __ Store(value, operand, representation);
4424 if (instr->hydrogen()->NeedsWriteBarrier()) {
4426 Register temp = access.IsInobject() ?
ToRegister(instr->temp()) :
object;
4428 __ RecordWriteField(write_register,
4432 GetSaveFPRegsMode(),
4439 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4444 __ mov(
ecx, instr->name());
4446 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4451 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4457 DeoptimizeIf(cc, check->environment());
4462 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4463 if (instr->hydrogen()->skip_check() && !FLAG_debug_code)
return;
4465 if (instr->index()->IsConstantOperand()) {
4466 Immediate immediate =
4467 ToImmediate(LConstantOperand::cast(instr->index()),
4468 instr->hydrogen()->length()->representation());
4472 ApplyCheckIf(condition, instr);
4477 ApplyCheckIf(condition, instr);
4482 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4484 LOperand* key = instr->key();
4485 if (!key->IsConstantOperand() &&
4486 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4490 Operand operand(BuildFastArrayOperand(
4493 instr->hydrogen()->key()->representation(),
4496 instr->additional_index()));
4500 CpuFeatureScope scope(masm(),
SSE2);
4501 XMMRegister xmm_scratch = double_scratch0();
4503 __ movss(operand, xmm_scratch);
4511 CpuFeatureScope scope(masm(),
SSE2);
4518 switch (elements_kind) {
4525 __ mov_b(operand, value);
4531 __ mov_w(operand, value);
4537 __ mov(operand, value);
4558 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4559 ExternalReference canonical_nan_reference =
4560 ExternalReference::address_of_canonical_non_hole_nan();
4561 Operand double_store_operand = BuildFastArrayOperand(
4564 instr->hydrogen()->key()->representation(),
4567 instr->additional_index());
4570 CpuFeatureScope scope(masm(),
SSE2);
4573 if (instr->NeedsCanonicalization()) {
4576 __ ucomisd(value, value);
4579 __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
4580 __ bind(&have_value);
4583 __ movsd(double_store_operand, value);
4586 if (instr->hydrogen()->IsConstantHoleStore()) {
4590 uint64_t int_val = BitCast<uint64_t, double>(nan_double);
4594 __ mov(double_store_operand, Immediate(lower));
4595 Operand double_store_operand2 = BuildFastArrayOperand(
4598 instr->hydrogen()->key()->representation(),
4601 instr->additional_index());
4602 __ mov(double_store_operand2, Immediate(upper));
4604 Label no_special_nan_handling;
4608 if (instr->NeedsCanonicalization()) {
4613 __ j(
parity_odd, &no_special_nan_handling, Label::kNear);
4621 __ jmp(&no_special_nan_handling, Label::kNear);
4622 __ bind(&canonicalize);
4624 __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4627 __ bind(&no_special_nan_handling);
4628 __ fst_d(double_store_operand);
4634 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4635 Register elements =
ToRegister(instr->elements());
4638 Operand operand = BuildFastArrayOperand(
4641 instr->hydrogen()->key()->representation(),
4644 instr->additional_index());
4645 if (instr->value()->IsRegister()) {
4648 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4649 if (
IsSmi(operand_value)) {
4651 __ mov(operand, immediate);
4654 Handle<Object> handle_value =
ToHandle(operand_value);
4655 __ mov(operand, handle_value);
4659 if (instr->hydrogen()->NeedsWriteBarrier()) {
4660 ASSERT(instr->value()->IsRegister());
4662 ASSERT(!instr->key()->IsConstantOperand());
4664 instr->hydrogen()->value()->IsHeapObject()
4667 __ lea(key, operand);
4668 __ RecordWrite(elements,
4671 GetSaveFPRegsMode(),
4678 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4680 if (instr->is_typed_elements()) {
4681 DoStoreKeyedExternalArray(instr);
4682 }
else if (instr->hydrogen()->value()->representation().IsDouble()) {
4683 DoStoreKeyedFixedDoubleArray(instr);
4685 DoStoreKeyedFixedArray(instr);
4690 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4696 Handle<Code> ic = instr->strict_mode() ==
STRICT
4697 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4698 : isolate()->builtins()->KeyedStoreIC_Initialize();
4699 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4703 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4704 Register
object =
ToRegister(instr->object());
4706 Label no_memento_found;
4707 __ TestJSArrayForAllocationMemento(
object, temp, &no_memento_found);
4708 DeoptimizeIf(
equal, instr->environment());
4709 __ bind(&no_memento_found);
4713 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4714 Register object_reg =
ToRegister(instr->object());
4716 Handle<Map> from_map = instr->original_map();
4717 Handle<Map> to_map = instr->transitioned_map();
4721 Label not_applicable;
4722 bool is_simple_map_transition =
4724 Label::Distance branch_distance =
4725 is_simple_map_transition ? Label::kNear : Label::kFar;
4727 __ j(
not_equal, ¬_applicable, branch_distance);
4728 if (is_simple_map_transition) {
4729 Register new_map_reg =
ToRegister(instr->new_map_temp());
4734 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4739 PushSafepointRegistersScope scope(
this);
4740 if (!object_reg.is(
eax)) {
4741 __ mov(
eax, object_reg);
4743 __ mov(
ebx, to_map);
4744 bool is_js_array = from_map->instance_type() ==
JS_ARRAY_TYPE;
4745 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
4747 RecordSafepointWithRegisters(
4748 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4750 __ bind(¬_applicable);
4754 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4757 DeferredStringCharCodeAt(
LCodeGen* codegen,
4758 LStringCharCodeAt* instr,
4759 const X87Stack& x87_stack)
4762 codegen()->DoDeferredStringCharCodeAt(instr_);
4764 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4766 LStringCharCodeAt* instr_;
4769 DeferredStringCharCodeAt* deferred =
4770 new(zone()) DeferredStringCharCodeAt(
this, instr, x87_stack_);
4778 __ bind(deferred->exit());
4783 Register
string =
ToRegister(instr->string());
4784 Register result =
ToRegister(instr->result());
4789 __ Move(result, Immediate(0));
4791 PushSafepointRegistersScope scope(
this);
4796 if (instr->index()->IsConstantOperand()) {
4797 Immediate immediate =
ToImmediate(LConstantOperand::cast(instr->index()),
4805 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
4806 instr, instr->context());
4809 __ StoreToSafepointRegisterSlot(result,
eax);
4813 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4816 DeferredStringCharFromCode(
LCodeGen* codegen,
4817 LStringCharFromCode* instr,
4818 const X87Stack& x87_stack)
4821 codegen()->DoDeferredStringCharFromCode(instr_);
4823 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4825 LStringCharFromCode* instr_;
4828 DeferredStringCharFromCode* deferred =
4829 new(zone()) DeferredStringCharFromCode(
this, instr, x87_stack_);
4831 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4832 Register char_code =
ToRegister(instr->char_code());
4833 Register result =
ToRegister(instr->result());
4834 ASSERT(!char_code.is(result));
4837 __ j(
above, deferred->entry());
4838 __ Move(result, Immediate(factory()->single_character_string_cache()));
4842 __ cmp(result, factory()->undefined_value());
4843 __ j(
equal, deferred->entry());
4844 __ bind(deferred->exit());
4849 Register char_code =
ToRegister(instr->char_code());
4850 Register result =
ToRegister(instr->result());
4855 __ Move(result, Immediate(0));
4857 PushSafepointRegistersScope scope(
this);
4858 __ SmiTag(char_code);
4860 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4861 __ StoreToSafepointRegisterSlot(result,
eax);
4865 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4869 StringAddStub stub(instr->hydrogen()->flags(),
4870 instr->hydrogen()->pretenure_flag());
4871 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4875 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4876 LOperand* input = instr->value();
4877 LOperand* output = instr->result();
4878 ASSERT(input->IsRegister() || input->IsStackSlot());
4879 ASSERT(output->IsDoubleRegister());
4881 CpuFeatureScope scope(masm(),
SSE2);
4883 }
else if (input->IsRegister()) {
4894 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4895 LOperand* input = instr->value();
4896 LOperand* output = instr->result();
4898 CpuFeatureScope scope(masm(),
SSE2);
4899 LOperand* temp = instr->temp();
4913 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4916 DeferredNumberTagI(
LCodeGen* codegen,
4918 const X87Stack& x87_stack)
4921 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4924 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4926 LNumberTagI* instr_;
4929 LOperand* input = instr->value();
4930 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4933 DeferredNumberTagI* deferred =
4934 new(zone()) DeferredNumberTagI(
this, instr, x87_stack_);
4937 __ bind(deferred->exit());
4941 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4944 DeferredNumberTagU(
LCodeGen* codegen,
4946 const X87Stack& x87_stack)
4949 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4952 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4954 LNumberTagU* instr_;
4957 LOperand* input = instr->value();
4958 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4961 DeferredNumberTagU* deferred =
4962 new(zone()) DeferredNumberTagU(
this, instr, x87_stack_);
4964 __ j(
above, deferred->entry());
4966 __ bind(deferred->exit());
4974 IntegerSignedness signedness) {
4978 XMMRegister xmm_scratch = double_scratch0();
4985 __ xor_(reg, 0x80000000);
4987 CpuFeatureScope feature_scope(masm(),
SSE2);
4988 __ Cvtsi2sd(xmm_scratch, Operand(reg));
4991 __ fild_s(Operand(
esp, 0));
4996 CpuFeatureScope feature_scope(masm(),
SSE2);
5001 __ push(Immediate(0));
5003 __ fild_d(Operand(
esp, 0));
5009 if (FLAG_inline_new) {
5010 __ AllocateHeapNumber(reg, tmp,
no_reg, &slow);
5011 __ jmp(&done, Label::kNear);
5020 __ Move(reg, Immediate(0));
5023 PushSafepointRegistersScope scope(
this);
5031 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5032 RecordSafepointWithRegisters(
5033 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5034 __ StoreToSafepointRegisterSlot(reg,
eax);
5041 CpuFeatureScope feature_scope(masm(),
SSE2);
5049 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5052 DeferredNumberTagD(
LCodeGen* codegen,
5054 const X87Stack& x87_stack)
5057 codegen()->DoDeferredNumberTagD(instr_);
5059 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5061 LNumberTagD* instr_;
5073 DeferredNumberTagD* deferred =
5074 new(zone()) DeferredNumberTagD(
this, instr, x87_stack_);
5075 if (FLAG_inline_new) {
5077 __ AllocateHeapNumber(reg, tmp,
no_reg, deferred->entry());
5079 __ jmp(deferred->entry());
5081 __ bind(deferred->exit());
5083 CpuFeatureScope scope(masm(),
SSE2);
5097 __ Move(reg, Immediate(0));
5099 PushSafepointRegistersScope scope(
this);
5106 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5107 RecordSafepointWithRegisters(
5108 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5109 __ StoreToSafepointRegisterSlot(reg,
eax);
5113 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5114 HChange* hchange = instr->hydrogen();
5118 __ test(input, Immediate(0xc0000000));
5119 DeoptimizeIf(
not_zero, instr->environment());
5124 DeoptimizeIf(
overflow, instr->environment());
5129 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5130 LOperand* input = instr->value();
5132 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5133 if (instr->needs_check()) {
5135 DeoptimizeIf(
not_zero, instr->environment());
5137 __ AssertSmi(result);
5139 __ SmiUntag(result);
5143 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
5145 X87Register res_reg,
5146 bool can_convert_undefined_to_nan,
5147 bool deoptimize_on_minus_zero,
5150 Label load_smi, done;
5155 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5159 factory()->heap_number_map());
5160 if (!can_convert_undefined_to_nan) {
5163 Label heap_number, convert;
5164 __ j(
equal, &heap_number, Label::kNear);
5167 __ cmp(input_reg, factory()->undefined_value());
5171 ExternalReference nan =
5172 ExternalReference::address_of_canonical_non_hole_nan();
5173 __ fld_d(Operand::StaticVariable(nan));
5174 __ jmp(&done, Label::kNear);
5176 __ bind(&heap_number);
5180 if (deoptimize_on_minus_zero) {
5189 __ j(zero, &done, Label::kNear);
5195 __ jmp(&done, Label::kNear);
5203 __ mov(temp_reg, input_reg);
5204 __ SmiUntag(temp_reg);
5206 __ fild_s(Operand(
esp, 0));
5207 __ add(
esp, Immediate(kPointerSize));
5213 void LCodeGen::EmitNumberUntagD(Register input_reg,
5215 XMMRegister result_reg,
5216 bool can_convert_undefined_to_nan,
5217 bool deoptimize_on_minus_zero,
5220 Label convert, load_smi, done;
5224 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5228 factory()->heap_number_map());
5229 if (can_convert_undefined_to_nan) {
5238 if (deoptimize_on_minus_zero) {
5239 XMMRegister xmm_scratch = double_scratch0();
5240 __ xorps(xmm_scratch, xmm_scratch);
5241 __ ucomisd(result_reg, xmm_scratch);
5243 __ movmskpd(temp_reg, result_reg);
5244 __ test_b(temp_reg, 1);
5247 __ jmp(&done, Label::kNear);
5249 if (can_convert_undefined_to_nan) {
5253 __ cmp(input_reg, factory()->undefined_value());
5256 ExternalReference nan =
5257 ExternalReference::address_of_canonical_non_hole_nan();
5258 __ movsd(result_reg, Operand::StaticVariable(nan));
5259 __ jmp(&done, Label::kNear);
5268 __ mov(temp_reg, input_reg);
5269 __ SmiUntag(temp_reg);
5270 __ Cvtsi2sd(result_reg, Operand(temp_reg));
5276 Register input_reg =
ToRegister(instr->value());
5280 __ lea(input_reg, Operand(input_reg,
times_2, kHeapObjectTag));
5282 if (instr->truncating()) {
5283 Label no_heap_number, check_bools, check_false;
5287 factory()->heap_number_map());
5289 __ TruncateHeapNumberToI(input_reg, input_reg);
5292 __ bind(&no_heap_number);
5295 __ cmp(input_reg, factory()->undefined_value());
5297 __ Move(input_reg, Immediate(0));
5300 __ bind(&check_bools);
5301 __ cmp(input_reg, factory()->true_value());
5303 __ Move(input_reg, Immediate(1));
5306 __ bind(&check_false);
5307 __ cmp(input_reg, factory()->false_value());
5308 __ RecordComment(
"Deferred TaggedToI: cannot truncate");
5309 DeoptimizeIf(
not_equal, instr->environment());
5310 __ Move(input_reg, Immediate(0));
5313 XMMRegister scratch = (instr->temp() !=
NULL)
5316 __ TaggedToI(input_reg, input_reg, scratch,
5317 instr->hydrogen()->GetMinusZeroMode(), &bailout);
5325 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5328 DeferredTaggedToI(
LCodeGen* codegen,
5330 const X87Stack& x87_stack)
5333 codegen()->DoDeferredTaggedToI(instr_, done());
5335 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5340 LOperand* input = instr->value();
5341 ASSERT(input->IsRegister());
5345 if (instr->hydrogen()->value()->representation().IsSmi()) {
5346 __ SmiUntag(input_reg);
5348 DeferredTaggedToI* deferred =
5349 new(zone()) DeferredTaggedToI(
this, instr, x87_stack_);
5353 __ SmiUntag(input_reg);
5356 __ j(
carry, deferred->entry());
5357 __ bind(deferred->exit());
5362 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5363 LOperand* input = instr->value();
5364 ASSERT(input->IsRegister());
5365 LOperand* temp = instr->temp();
5366 ASSERT(temp->IsRegister());
5367 LOperand* result = instr->result();
5368 ASSERT(result->IsDoubleRegister());
5371 bool deoptimize_on_minus_zero =
5372 instr->hydrogen()->deoptimize_on_minus_zero();
5375 HValue* value = instr->hydrogen()->value();
5380 CpuFeatureScope scope(masm(),
SSE2);
5382 EmitNumberUntagD(input_reg,
5385 instr->hydrogen()->can_convert_undefined_to_nan(),
5386 deoptimize_on_minus_zero,
5387 instr->environment(),
5390 EmitNumberUntagDNoSSE2(input_reg,
5393 instr->hydrogen()->can_convert_undefined_to_nan(),
5394 deoptimize_on_minus_zero,
5395 instr->environment(),
5401 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5402 LOperand* input = instr->value();
5403 ASSERT(input->IsDoubleRegister());
5404 LOperand* result = instr->result();
5405 ASSERT(result->IsRegister());
5408 if (instr->truncating()) {
5410 CpuFeatureScope scope(masm(),
SSE2);
5412 __ TruncateDoubleToI(result_reg, input_reg);
5416 __ TruncateX87TOSToI(result_reg);
5419 Label bailout, done;
5421 CpuFeatureScope scope(masm(),
SSE2);
5423 XMMRegister xmm_scratch = double_scratch0();
5424 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5425 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5429 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5430 &bailout, Label::kNear);
5432 __ jmp(&done, Label::kNear);
5440 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5441 LOperand* input = instr->value();
5442 ASSERT(input->IsDoubleRegister());
5443 LOperand* result = instr->result();
5444 ASSERT(result->IsRegister());
5447 Label bailout, done;
5449 CpuFeatureScope scope(masm(),
SSE2);
5451 XMMRegister xmm_scratch = double_scratch0();
5452 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5453 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5457 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5458 &bailout, Label::kNear);
5460 __ jmp(&done, Label::kNear);
5465 __ SmiTag(result_reg);
5466 DeoptimizeIf(
overflow, instr->environment());
5470 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5471 LOperand* input = instr->value();
5473 DeoptimizeIf(
not_zero, instr->environment());
5477 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5478 if (!instr->hydrogen()->value()->IsHeapObject()) {
5479 LOperand* input = instr->value();
5481 DeoptimizeIf(zero, instr->environment());
5486 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5492 if (instr->hydrogen()->is_interval_check()) {
5495 instr->hydrogen()->GetCheckInterval(&first, &last);
5498 static_cast<int8_t>(first));
5501 if (first == last) {
5502 DeoptimizeIf(
not_equal, instr->environment());
5504 DeoptimizeIf(
below, instr->environment());
5508 static_cast<int8_t>(last));
5509 DeoptimizeIf(
above, instr->environment());
5515 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5520 DeoptimizeIf(tag == 0 ?
not_zero : zero, instr->environment());
5523 __ and_(temp, mask);
5525 DeoptimizeIf(
not_equal, instr->environment());
5531 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5532 Handle<HeapObject>
object = instr->hydrogen()->object().handle();
5533 if (instr->hydrogen()->object_in_new_space()) {
5535 Handle<Cell> cell = isolate()->factory()->NewCell(
object);
5536 __ cmp(reg, Operand::ForCell(cell));
5538 Operand operand =
ToOperand(instr->value());
5539 __ cmp(operand,
object);
5541 DeoptimizeIf(
not_equal, instr->environment());
5547 PushSafepointRegistersScope scope(
this);
5550 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5551 RecordSafepointWithRegisters(
5552 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5556 DeoptimizeIf(zero, instr->environment());
5560 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5563 DeferredCheckMaps(
LCodeGen* codegen,
5566 const X87Stack& x87_stack)
5567 :
LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5568 SetExit(check_maps());
5571 codegen()->DoDeferredInstanceMigration(instr_, object_);
5573 Label* check_maps() {
return &check_maps_; }
5574 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5581 if (instr->hydrogen()->CanOmitMapChecks())
return;
5583 LOperand* input = instr->value();
5584 ASSERT(input->IsRegister());
5587 DeferredCheckMaps* deferred =
NULL;
5588 if (instr->hydrogen()->has_migration_target()) {
5589 deferred =
new(zone()) DeferredCheckMaps(
this, instr, reg, x87_stack_);
5590 __ bind(deferred->check_maps());
5593 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5595 for (
int i = 0; i < map_set.size() - 1; i++) {
5596 Handle<Map> map = map_set.at(i).handle();
5597 __ CompareMap(reg, map);
5598 __ j(
equal, &success, Label::kNear);
5601 Handle<Map> map = map_set.at(map_set.size() - 1).
handle();
5602 __ CompareMap(reg, map);
5603 if (instr->hydrogen()->has_migration_target()) {
5606 DeoptimizeIf(
not_equal, instr->environment());
5613 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5614 CpuFeatureScope scope(masm(),
SSE2);
5616 XMMRegister xmm_scratch = double_scratch0();
5617 Register result_reg =
ToRegister(instr->result());
5618 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5622 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5623 ASSERT(instr->unclamped()->Equals(instr->result()));
5624 Register value_reg =
ToRegister(instr->result());
5625 __ ClampUint8(value_reg);
5629 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5630 CpuFeatureScope scope(masm(),
SSE2);
5632 ASSERT(instr->unclamped()->Equals(instr->result()));
5633 Register input_reg =
ToRegister(instr->unclamped());
5635 XMMRegister xmm_scratch = double_scratch0();
5636 Label is_smi, done, heap_number;
5638 __ JumpIfSmi(input_reg, &is_smi);
5642 factory()->heap_number_map());
5643 __ j(
equal, &heap_number, Label::kNear);
5647 __ cmp(input_reg, factory()->undefined_value());
5648 DeoptimizeIf(
not_equal, instr->environment());
5649 __ mov(input_reg, 0);
5650 __ jmp(&done, Label::kNear);
5653 __ bind(&heap_number);
5655 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5656 __ jmp(&done, Label::kNear);
5660 __ SmiUntag(input_reg);
5661 __ ClampUint8(input_reg);
5666 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5667 Register input_reg =
ToRegister(instr->unclamped());
5668 Register result_reg =
ToRegister(instr->result());
5669 Register scratch =
ToRegister(instr->scratch());
5670 Register scratch2 =
ToRegister(instr->scratch2());
5671 Register scratch3 =
ToRegister(instr->scratch3());
5672 Label is_smi, done, heap_number, valid_exponent,
5673 largest_value, zero_result, maybe_nan_or_infinity;
5675 __ JumpIfSmi(input_reg, &is_smi);
5679 factory()->heap_number_map());
5680 __ j(
equal, &heap_number, Label::kNear);
5684 __ cmp(input_reg, factory()->undefined_value());
5685 DeoptimizeIf(
not_equal, instr->environment());
5686 __ jmp(&zero_result, Label::kNear);
5689 __ bind(&heap_number);
5700 __ test(scratch, scratch);
5704 __ mov(scratch2, scratch);
5707 __ j(zero, &zero_result, Label::kNear);
5711 const uint32_t non_int8_exponent = 7;
5712 __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5714 __ j(
greater, &maybe_nan_or_infinity, Label::kNear);
5716 __ bind(&valid_exponent);
5734 __ mov(scratch2, scratch);
5735 const uint32_t one_half_bit_shift = 30 -
sizeof(uint8_t) * 8;
5736 const uint32_t one_bit_shift = one_half_bit_shift + 1;
5737 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5738 __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5740 __ j(
less, &no_round, Label::kNear);
5742 __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5744 __ test(scratch3, scratch3);
5746 __ mov(scratch2, scratch);
5747 __ and_(scratch2, Immediate(1 << one_bit_shift));
5748 __ shr(scratch2, 1);
5750 __ add(scratch, scratch2);
5751 __ j(
overflow, &largest_value, Label::kNear);
5753 __ shr(scratch, 23);
5754 __ mov(result_reg, scratch);
5755 __ jmp(&done, Label::kNear);
5757 __ bind(&maybe_nan_or_infinity);
5769 __ bind(&largest_value);
5770 __ mov(result_reg, Immediate(255));
5771 __ jmp(&done, Label::kNear);
5773 __ bind(&zero_result);
5774 __ xor_(result_reg, result_reg);
5775 __ jmp(&done, Label::kNear);
5779 if (!input_reg.is(result_reg)) {
5780 __ mov(result_reg, input_reg);
5782 __ SmiUntag(result_reg);
5783 __ ClampUint8(result_reg);
5788 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5789 CpuFeatureScope scope(masm(),
SSE2);
5791 Register result_reg =
ToRegister(instr->result());
5792 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5794 CpuFeatureScope scope2(masm(),
SSE4_1);
5795 __ pextrd(result_reg, value_reg, 1);
5797 XMMRegister xmm_scratch = double_scratch0();
5798 __ pshufd(xmm_scratch, value_reg, 1);
5799 __ movd(result_reg, xmm_scratch);
5802 __ movd(result_reg, value_reg);
5807 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5811 CpuFeatureScope scope(masm(),
SSE2);
5814 CpuFeatureScope scope2(masm(),
SSE4_1);
5815 __ movd(result_reg, lo_reg);
5816 __ pinsrd(result_reg, hi_reg, 1);
5818 XMMRegister xmm_scratch = double_scratch0();
5819 __ movd(result_reg, hi_reg);
5820 __ psllq(result_reg, 32);
5821 __ movd(xmm_scratch, lo_reg);
5822 __ orps(result_reg, xmm_scratch);
5827 void LCodeGen::DoAllocate(LAllocate* instr) {
5830 DeferredAllocate(
LCodeGen* codegen,
5832 const X87Stack& x87_stack)
5835 codegen()->DoDeferredAllocate(instr_);
5837 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5842 DeferredAllocate* deferred =
5843 new(zone()) DeferredAllocate(
this, instr, x87_stack_);
5845 Register result =
ToRegister(instr->result());
5850 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5853 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5854 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5855 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5857 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5858 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5862 if (instr->size()->IsConstantOperand()) {
5865 __ Allocate(size, result, temp,
no_reg, deferred->entry(),
flags);
5867 __ jmp(deferred->entry());
5871 __ Allocate(size, result, temp,
no_reg, deferred->entry(),
flags);
5874 __ bind(deferred->exit());
5876 if (instr->hydrogen()->MustPrefillWithFiller()) {
5877 if (instr->size()->IsConstantOperand()) {
5879 __ mov(temp, (size / kPointerSize) - 1);
5888 isolate()->factory()->one_pointer_filler_map());
5896 Register result =
ToRegister(instr->result());
5903 PushSafepointRegistersScope scope(
this);
5904 if (instr->size()->IsRegister()) {
5906 ASSERT(!size.is(result));
5915 instr->hydrogen()->MustAllocateDoubleAligned());
5916 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5917 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5918 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5920 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5921 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5928 CallRuntimeFromDeferred(
5929 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5930 __ StoreToSafepointRegisterSlot(result,
eax);
5934 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5937 CallRuntime(Runtime::kToFastProperties, 1, instr);
5941 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5949 int literal_offset =
5951 __ LoadHeapObject(
ecx, instr->hydrogen()->literals());
5953 __ cmp(
ebx, factory()->undefined_value());
5959 __ push(Immediate(
Smi::FromInt(instr->hydrogen()->literal_index())));
5960 __ push(Immediate(instr->hydrogen()->pattern()));
5961 __ push(Immediate(instr->hydrogen()->flags()));
5962 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5965 __ bind(&materialized);
5967 Label allocated, runtime_allocate;
5969 __ jmp(&allocated, Label::kNear);
5971 __ bind(&runtime_allocate);
5974 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5977 __ bind(&allocated);
5986 if ((size % (2 * kPointerSize)) != 0) {
5993 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5997 bool pretenure = instr->hydrogen()->pretenure();
5998 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5999 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
6000 instr->hydrogen()->is_generator());
6001 __ mov(
ebx, Immediate(instr->hydrogen()->shared_info()));
6002 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
6005 __ push(Immediate(instr->hydrogen()->shared_info()));
6006 __ push(Immediate(pretenure ? factory()->true_value()
6007 : factory()->false_value()));
6008 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
6013 void LCodeGen::DoTypeof(LTypeof* instr) {
6015 LOperand* input = instr->value();
6016 EmitPushTaggedOperand(input);
6017 CallRuntime(Runtime::kTypeof, 1, instr);
6021 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6023 Condition final_branch_condition = EmitTypeofIs(instr, input);
6025 EmitBranch(instr, final_branch_condition);
6030 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
6031 Label* true_label = instr->TrueLabel(chunk_);
6032 Label* false_label = instr->FalseLabel(chunk_);
6033 Handle<String> type_name = instr->type_literal();
6034 int left_block = instr->TrueDestination(chunk_);
6035 int right_block = instr->FalseDestination(chunk_);
6036 int next_block = GetNextEmittedBlock();
6038 Label::Distance true_distance = left_block == next_block ? Label::kNear
6040 Label::Distance false_distance = right_block == next_block ? Label::kNear
6043 if (type_name->Equals(heap()->number_string())) {
6044 __ JumpIfSmi(input, true_label, true_distance);
6046 factory()->heap_number_map());
6047 final_branch_condition =
equal;
6049 }
else if (type_name->Equals(heap()->string_string())) {
6050 __ JumpIfSmi(input, false_label, false_distance);
6055 final_branch_condition =
zero;
6057 }
else if (type_name->Equals(heap()->symbol_string())) {
6058 __ JumpIfSmi(input, false_label, false_distance);
6060 final_branch_condition =
equal;
6062 }
else if (type_name->Equals(heap()->boolean_string())) {
6063 __ cmp(input, factory()->true_value());
6064 __ j(
equal, true_label, true_distance);
6065 __ cmp(input, factory()->false_value());
6066 final_branch_condition =
equal;
6068 }
else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
6069 __ cmp(input, factory()->null_value());
6070 final_branch_condition =
equal;
6072 }
else if (type_name->Equals(heap()->undefined_string())) {
6073 __ cmp(input, factory()->undefined_value());
6074 __ j(
equal, true_label, true_distance);
6075 __ JumpIfSmi(input, false_label, false_distance);
6082 }
else if (type_name->Equals(heap()->function_string())) {
6084 __ JumpIfSmi(input, false_label, false_distance);
6086 __ j(
equal, true_label, true_distance);
6088 final_branch_condition =
equal;
6090 }
else if (type_name->Equals(heap()->object_string())) {
6091 __ JumpIfSmi(input, false_label, false_distance);
6092 if (!FLAG_harmony_typeof) {
6093 __ cmp(input, factory()->null_value());
6094 __ j(
equal, true_label, true_distance);
6097 __ j(
below, false_label, false_distance);
6099 __ j(
above, false_label, false_distance);
6103 final_branch_condition =
zero;
6106 __ jmp(false_label, false_distance);
6108 return final_branch_condition;
6112 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6115 EmitIsConstructCall(temp);
6116 EmitBranch(instr,
equal);
6120 void LCodeGen::EmitIsConstructCall(Register temp) {
6125 Label check_frame_marker;
6128 __ j(
not_equal, &check_frame_marker, Label::kNear);
6132 __ bind(&check_frame_marker);
6138 void LCodeGen::EnsureSpaceForLazyDeopt(
int space_needed) {
6139 if (!
info()->IsStub()) {
6142 int current_pc = masm()->pc_offset();
6143 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6144 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6145 __ Nop(padding_size);
6148 last_lazy_deopt_pc_ = masm()->pc_offset();
6152 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6153 last_lazy_deopt_pc_ = masm()->pc_offset();
6154 ASSERT(instr->HasEnvironment());
6156 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6157 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6161 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6170 Comment(
";;; deoptimize: %s", instr->hydrogen()->reason());
6171 DeoptimizeIf(
no_condition, instr->environment(), type);
6175 void LCodeGen::DoDummy(LDummy* instr) {
6180 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6186 PushSafepointRegistersScope scope(
this);
6188 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
6189 RecordSafepointWithLazyDeopt(
6190 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
6191 ASSERT(instr->HasEnvironment());
6193 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6197 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6200 DeferredStackCheck(
LCodeGen* codegen,
6202 const X87Stack& x87_stack)
6205 codegen()->DoDeferredStackCheck(instr_);
6207 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
6209 LStackCheck* instr_;
6212 ASSERT(instr->HasEnvironment());
6216 if (instr->hydrogen()->is_function_entry()) {
6219 ExternalReference stack_limit =
6220 ExternalReference::address_of_stack_limit(isolate());
6221 __ cmp(
esp, Operand::StaticVariable(stack_limit));
6224 ASSERT(instr->context()->IsRegister());
6226 CallCode(isolate()->builtins()->StackCheck(),
6227 RelocInfo::CODE_TARGET,
6231 ASSERT(instr->hydrogen()->is_backwards_branch());
6233 DeferredStackCheck* deferred_stack_check =
6234 new(zone()) DeferredStackCheck(
this, instr, x87_stack_);
6235 ExternalReference stack_limit =
6236 ExternalReference::address_of_stack_limit(isolate());
6237 __ cmp(
esp, Operand::StaticVariable(stack_limit));
6238 __ j(
below, deferred_stack_check->entry());
6240 __ bind(instr->done_label());
6241 deferred_stack_check->SetExit(instr->done_label());
6242 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6250 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6258 ASSERT(!environment->HasBeenRegistered());
6259 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6261 GenerateOsrPrologue();
6265 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6267 __ cmp(
eax, isolate()->factory()->undefined_value());
6268 DeoptimizeIf(
equal, instr->environment());
6270 __ cmp(
eax, isolate()->factory()->null_value());
6271 DeoptimizeIf(
equal, instr->environment());
6274 DeoptimizeIf(zero, instr->environment());
6280 Label use_cache, call_runtime;
6281 __ CheckEnumCache(&call_runtime);
6284 __ jmp(&use_cache, Label::kNear);
6287 __ bind(&call_runtime);
6289 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6292 isolate()->factory()->meta_map());
6293 DeoptimizeIf(
not_equal, instr->environment());
6294 __ bind(&use_cache);
6298 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6300 Register result =
ToRegister(instr->result());
6301 Label load_cache, done;
6302 __ EnumLength(result, map);
6305 __ mov(result, isolate()->factory()->empty_fixed_array());
6306 __ jmp(&done, Label::kNear);
6308 __ bind(&load_cache);
6309 __ LoadInstanceDescriptors(map, result);
6315 __ test(result, result);
6316 DeoptimizeIf(
equal, instr->environment());
6320 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6321 Register
object =
ToRegister(instr->value());
6324 DeoptimizeIf(
not_equal, instr->environment());
6328 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6329 Register
object =
ToRegister(instr->object());
6332 Label out_of_object, done;
6333 __ cmp(index, Immediate(0));
6334 __ j(
less, &out_of_object, Label::kNear);
6339 __ jmp(&done, Label::kNear);
6341 __ bind(&out_of_object);
6357 #endif // V8_TARGET_ARCH_IA32
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static const int kLengthOffset
void FinishCode(Handle< Code > code)
static const int kHashFieldOffset
static const int kBitFieldOffset
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
const intptr_t kSmiTagMask
static const int kCodeEntryOffset
static const int kPrototypeOrInitialMapOffset
static int SlotOffset(int index)
static Representation Smi()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
static double hole_nan_as_double()
static const int kEnumCacheOffset
static const int kMaxUtf16CodeUnit
const uint32_t kTwoByteStringTag
static const uint32_t kExponentMask
int StackSlotOffset(int index)
RegisterType type() const
static Smi * FromInt(int value)
bool NeedsEagerFrame() const
static Handle< Code > GetUninitialized(Isolate *isolate, Token::Value op)
static const int kDataOffset
bool IsSmi(LConstantOperand *op) const
static Handle< T > cast(Handle< S > that)
Operand HighOperand(LOperand *op)
static const int kGlobalReceiverOffset
static Representation Integer32()
void X87PrepareBinaryOp(X87Register left, X87Register right, X87Register result)
void X87LoadForUsage(X87Register reg)
static const int kNativeByteOffset
static const int kExponentBias
static XMMRegister FromAllocationIndex(int index)
static const unsigned int kContainsCachedArrayIndexMask
static bool IsSupported(CpuFeature f)
static const int kStrictModeBitWithinByte
const int kNoAlignmentPadding
X87Register ToX87Register(LOperand *op) const
static X87Register FromAllocationIndex(int index)
AllocationSiteOverrideMode
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
#define ASSERT(condition)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
static const int kContextOffset
const int kPointerSizeLog2
static bool IsSafeForSnapshot(CpuFeature f)
static const int kInObjectFieldCount
const uint32_t kStringRepresentationMask
MemOperand GlobalObjectOperand()
static const int kCallerFPOffset
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
static Handle< Code > initialize_stub(Isolate *isolate, ExtraICState extra_state)
static const int kInstanceClassNameOffset
const bool FLAG_enable_slow_asserts
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Variable * parameter(int index) const
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
const int kAlignmentPaddingPushed
void DoDeferredStackCheck(LStackCheck *instr)
void X87PrepareToWrite(X87Register reg)
int LookupDestination(int block_id) const
Condition ReverseCondition(Condition cond)
Immediate ToImmediate(LOperand *op, const Representation &r) const
Operand ToOperand(LOperand *op)
const uint32_t kSlotsZapValue
int32_t WhichPowerOf2Abs(int32_t x)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static const int kInfinityOrNanExponent
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
static const int kNumAllocatableRegisters
friend class LEnvironment
static const int kLengthOffset
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kExponentShift
static const int kValueOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kHoleNanUpper32
static const int kDontAdaptArgumentsSentinel
void DoDeferredNumberTagD(LNumberTagD *instr)
static uint32_t update(uint32_tprevious, AllocationSpacevalue)
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
void check(i::Vector< const uint8_t > string)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
static void MaybeCallEntryHook(MacroAssembler *masm)
Operand FieldOperand(Register object, int offset)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
const uint32_t kHoleNanLower32
static const int kMaxRegularHeapObjectSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
static const int kCallerSPOffset
static const int kDynamicAlignmentStateOffset
static const int kCacheStampOffset
bool IsFixedTypedArrayElementsKind(ElementsKind kind)
static const int kPropertiesOffset
void X87Fxch(X87Register reg, int other_slot=0)
const int kAlignmentZapValue
#define ASSERT_LE(v1, v2)
int32_t ToInteger32(LConstantOperand *op) const
int num_parameters() const
bool IsInteger32(LConstantOperand *op) const
static const int kMarkerOffset
bool IsFastSmiElementsKind(ElementsKind kind)
void X87Free(X87Register reg)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
Handle< Object > ToHandle(LConstantOperand *op) const
static void EnsureRelocSpaceForLazyDeoptimization(Handle< Code > code)
static const int kHeaderSize
static const int kNativeBitWithinByte
#define STATIC_ASCII_VECTOR(x)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
static int OffsetOfElementAt(int index)
static int SizeFor(int length)
bool NeedsDeferredFrame() const
static const int kHeaderSize
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
static const int kMapOffset
static const int kValueOffset
bool is(Register reg) const
Handle< T > handle(T *t, Isolate *isolate)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
static const int kHasNonInstancePrototype
void WriteTranslation(LEnvironment *environment, Translation *translation)
static const int kFunctionOffset
static const uint32_t kSignMask
static const int kNotDeoptimizationEntry
static const int kStrictModeByteOffset
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
static const int kHeaderSize
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
void X87Mov(X87Register reg, Operand src, X87OperandType operand=kX87DoubleOperand)
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
static const int kConstructorOffset
static const uint32_t kMantissaMask
const uint32_t kOneByteStringTag
#define ASSERT_NE(v1, v2)
static const int kIsUndetectable
void X87CommitWrite(X87Register reg)
static const int kHeaderSize
Register ToRegister(LOperand *op) const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
void DoDeferredTaggedToI(LTaggedToI *instr)
static const int kPrototypeOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
#define RUNTIME_ENTRY(name, nargs, ressize)
static const int kMaxLength
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
bool IsNextEmittedBlock(int block_id) const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
static const int kSharedFunctionInfoOffset
static const int kMaxValue
friend class SafepointGenerator
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
static const int kExponentOffset
bool EvalComparison(Token::Value op, double op1, double op2)
static uint32_t encode(boolvalue)
const uint32_t kStringEncodingMask
static const int kInstanceTypeOffset
static const int kMantissaOffset
friend class LDeferredCode