30 #if defined(V8_TARGET_ARCH_IA32)
44 class SafepointGenerator :
public CallWrapper {
47 LPointerMap* pointers,
48 Safepoint::DeoptMode mode)
54 virtual void BeforeCall(
int call_size)
const {}
57 codegen_->RecordSafepoint(pointers_, deopt_mode_);
62 LPointerMap* pointers_;
63 Safepoint::DeoptMode deopt_mode_;
69 bool LCodeGen::GenerateCode() {
70 HPhase phase(
"Z_Code generation", chunk());
73 CpuFeatures::Scope scope(
SSE2);
75 CodeStub::GenerateFPStubs();
80 FrameScope frame_scope(masm_, StackFrame::MANUAL);
82 dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
83 !chunk()->graph()->is_recursive()) ||
84 !info()->osr_ast_id().IsNone();
86 return GeneratePrologue() &&
88 GenerateDeferredCode() &&
89 GenerateSafepointTable();
93 void LCodeGen::FinishCode(Handle<Code>
code) {
95 code->set_stack_slots(GetStackSlotCount());
96 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
97 PopulateDeoptimizationData(code);
102 void LCodeGen::Abort(
const char* reason) {
103 info()->set_bailout_reason(reason);
108 void LCodeGen::Comment(
const char* format, ...) {
109 if (!FLAG_code_comments)
return;
111 StringBuilder builder(buffer,
ARRAY_SIZE(buffer));
113 va_start(arguments, format);
114 builder.AddFormattedList(format, arguments);
119 size_t length = builder.position();
121 memcpy(copy.start(), builder.Finalize(), copy.length());
122 masm()->RecordComment(copy.start());
126 bool LCodeGen::GeneratePrologue() {
132 if (strlen(FLAG_stop_at) > 0 &&
133 info_->function()->name()->IsEqualTo(
CStrVector(FLAG_stop_at))) {
142 if (!info_->is_classic_mode() || info_->is_native()) {
145 __ j(
zero, &ok, Label::kNear);
147 int receiver_offset = (scope()->num_parameters() + 1) *
kPointerSize;
148 __ mov(Operand(
esp, receiver_offset),
149 Immediate(isolate()->factory()->undefined_value()));
154 if (dynamic_frame_alignment_) {
158 Label do_not_pad, align_loop;
163 __ push(Immediate(0));
167 __ mov(
ecx, Immediate(scope()->num_parameters() + 2));
169 __ bind(&align_loop);
176 __ bind(&do_not_pad);
184 if (dynamic_frame_alignment_ && FLAG_debug_code) {
186 __ Assert(
zero,
"frame is expected to be aligned");
190 int slots = GetStackSlotCount();
193 if (dynamic_frame_alignment_) {
199 if (FLAG_debug_code) {
200 __ mov(Operand(
eax), Immediate(slots));
212 const int kPageSize = 4 *
KB;
215 offset -= kPageSize) {
222 if (dynamic_frame_alignment_) {
235 if (heap_slots > 0) {
236 Comment(
";;; Allocate local context");
240 FastNewContextStub stub(heap_slots);
243 __ CallRuntime(Runtime::kNewFunctionContext, 1);
245 RecordSafepoint(Safepoint::kNoLazyDeopt);
251 int num_parameters = scope()->num_parameters();
252 for (
int i = 0; i < num_parameters; i++) {
253 Variable* var = scope()->parameter(i);
254 if (var->IsContextSlot()) {
258 __ mov(
eax, Operand(
ebp, parameter_offset));
261 __ mov(Operand(
esi, context_offset),
eax);
263 __ RecordWriteContextSlot(
esi,
270 Comment(
";;; End allocate local context");
277 __ CallRuntime(Runtime::kTraceEnter, 0);
279 return !is_aborted();
283 bool LCodeGen::GenerateBody() {
285 bool emit_instructions =
true;
286 for (current_instruction_ = 0;
287 !is_aborted() && current_instruction_ < instructions_->length();
288 current_instruction_++) {
289 LInstruction* instr = instructions_->at(current_instruction_);
290 if (instr->IsLabel()) {
292 emit_instructions = !label->HasReplacement();
295 if (emit_instructions) {
296 Comment(
";;; @%d: %s.", current_instruction_, instr->Mnemonic());
297 instr->CompileToNative(
this);
300 EnsureSpaceForLazyDeopt();
301 return !is_aborted();
305 bool LCodeGen::GenerateDeferredCode() {
307 if (deferred_.length() > 0) {
308 for (
int i = 0; !is_aborted() && i < deferred_.length(); i++) {
309 LDeferredCode* code = deferred_[i];
310 __ bind(code->entry());
311 Comment(
";;; Deferred code @%d: %s.",
312 code->instruction_index(),
313 code->instr()->Mnemonic());
315 __ jmp(code->exit());
321 if (!is_aborted()) status_ =
DONE;
322 return !is_aborted();
326 bool LCodeGen::GenerateSafepointTable() {
328 safepoints_.Emit(masm(), GetStackSlotCount());
329 return !is_aborted();
338 XMMRegister LCodeGen::ToDoubleRegister(
int index)
const {
349 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op)
const {
350 ASSERT(op->IsDoubleRegister());
351 return ToDoubleRegister(op->index());
355 int LCodeGen::ToInteger32(LConstantOperand* op)
const {
356 HConstant* constant = chunk_->LookupConstant(op);
357 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
358 ASSERT(constant->HasInteger32Value());
359 return constant->Integer32Value();
363 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op)
const {
364 HConstant* constant = chunk_->LookupConstant(op);
365 ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
366 return constant->handle();
370 double LCodeGen::ToDouble(LConstantOperand* op)
const {
371 HConstant* constant = chunk_->LookupConstant(op);
372 ASSERT(constant->HasDoubleValue());
373 return constant->DoubleValue();
377 bool LCodeGen::IsInteger32(LConstantOperand* op)
const {
378 return chunk_->LookupLiteralRepresentation(op).IsInteger32();
382 Operand LCodeGen::ToOperand(LOperand* op)
const {
383 if (op->IsRegister())
return Operand(
ToRegister(op));
384 if (op->IsDoubleRegister())
return Operand(ToDoubleRegister(op));
385 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
386 int index = op->index();
398 Operand LCodeGen::HighOperand(LOperand* op) {
399 ASSERT(op->IsDoubleStackSlot());
400 int index = op->index();
401 int offset = (index >= 0) ? index + 3 : index - 1;
406 void LCodeGen::WriteTranslation(LEnvironment* environment,
407 Translation* translation,
408 int* arguments_index,
409 int* arguments_count) {
410 if (environment ==
NULL)
return;
413 int translation_size = environment->values()->length();
415 int height = translation_size - environment->parameter_count();
421 *arguments_index = -environment->parameter_count();
422 *arguments_count = environment->parameter_count();
424 WriteTranslation(environment->outer(),
428 int closure_id = *info()->closure() != *environment->closure()
429 ? DefineDeoptimizationLiteral(environment->closure())
430 : Translation::kSelfLiteralId;
431 switch (environment->frame_type()) {
433 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
436 translation->BeginConstructStubFrame(closure_id, translation_size);
439 ASSERT(translation_size == 1);
441 translation->BeginGetterStubFrame(closure_id);
444 ASSERT(translation_size == 2);
446 translation->BeginSetterStubFrame(closure_id);
449 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
455 if (environment->entry() !=
NULL &&
456 environment->entry()->arguments_pushed()) {
457 *arguments_index = *arguments_index < 0
458 ? GetStackSlotCount()
459 : *arguments_index + *arguments_count;
460 *arguments_count = environment->entry()->arguments_count() + 1;
463 for (
int i = 0; i < translation_size; ++i) {
464 LOperand* value = environment->values()->at(i);
467 if (environment->spilled_registers() !=
NULL && value !=
NULL) {
468 if (value->IsRegister() &&
469 environment->spilled_registers()[value->index()] !=
NULL) {
470 translation->MarkDuplicate();
471 AddToTranslation(translation,
472 environment->spilled_registers()[value->index()],
473 environment->HasTaggedValueAt(i),
474 environment->HasUint32ValueAt(i),
478 value->IsDoubleRegister() &&
479 environment->spilled_double_registers()[value->index()] !=
NULL) {
480 translation->MarkDuplicate();
483 environment->spilled_double_registers()[value->index()],
491 AddToTranslation(translation,
493 environment->HasTaggedValueAt(i),
494 environment->HasUint32ValueAt(i),
501 void LCodeGen::AddToTranslation(Translation* translation,
506 int arguments_count) {
511 translation->StoreArgumentsObject(arguments_index, arguments_count);
512 }
else if (op->IsStackSlot()) {
514 translation->StoreStackSlot(op->index());
515 }
else if (is_uint32) {
516 translation->StoreUint32StackSlot(op->index());
518 translation->StoreInt32StackSlot(op->index());
520 }
else if (op->IsDoubleStackSlot()) {
521 translation->StoreDoubleStackSlot(op->index());
522 }
else if (op->IsArgument()) {
524 int src_index = GetStackSlotCount() + op->index();
525 translation->StoreStackSlot(src_index);
526 }
else if (op->IsRegister()) {
529 translation->StoreRegister(reg);
530 }
else if (is_uint32) {
531 translation->StoreUint32Register(reg);
533 translation->StoreInt32Register(reg);
535 }
else if (op->IsDoubleRegister()) {
536 XMMRegister reg = ToDoubleRegister(op);
537 translation->StoreDoubleRegister(reg);
538 }
else if (op->IsConstantOperand()) {
540 int src_index = DefineDeoptimizationLiteral(constant->handle());
541 translation->StoreLiteral(src_index);
548 void LCodeGen::CallCodeGeneric(Handle<Code> code,
549 RelocInfo::Mode mode,
551 SafepointMode safepoint_mode) {
553 LPointerMap* pointers = instr->pointer_map();
554 RecordPosition(pointers->position());
556 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
560 if (code->kind() == Code::BINARY_OP_IC ||
561 code->kind() == Code::COMPARE_IC) {
567 void LCodeGen::CallCode(Handle<Code> code,
568 RelocInfo::Mode mode,
569 LInstruction* instr) {
570 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
574 void LCodeGen::CallRuntime(
const Runtime::Function* fun,
576 LInstruction* instr) {
578 ASSERT(instr->HasPointerMap());
579 LPointerMap* pointers = instr->pointer_map();
580 RecordPosition(pointers->position());
582 __ CallRuntime(fun, argc);
584 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
592 if (context->IsRegister()) {
596 }
else if (context->IsStackSlot()) {
597 __ mov(
esi, ToOperand(context));
598 }
else if (context->IsConstantOperand()) {
599 HConstant* constant =
601 __ LoadHeapObject(
esi, Handle<Context>::cast(constant->handle()));
606 __ CallRuntimeSaveDoubles(
id);
607 RecordSafepointWithRegisters(
608 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
612 void LCodeGen::RegisterEnvironmentForDeoptimization(
613 LEnvironment* environment, Safepoint::DeoptMode mode) {
614 if (!environment->HasBeenRegistered()) {
629 int jsframe_count = 0;
632 for (LEnvironment* e = environment; e !=
NULL; e = e->outer()) {
638 Translation translation(&translations_, frame_count, jsframe_count, zone());
639 WriteTranslation(environment, &translation, &args_index, &args_count);
640 int deoptimization_index = deoptimizations_.length();
641 int pc_offset = masm()->pc_offset();
642 environment->Register(deoptimization_index,
644 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
645 deoptimizations_.Add(environment, zone());
650 void LCodeGen::DeoptimizeIf(
Condition cc, LEnvironment* environment) {
651 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
652 ASSERT(environment->HasBeenRegistered());
653 int id = environment->deoptimization_index();
656 Abort(
"bailout was not prepared");
660 if (FLAG_deopt_every_n_times != 0) {
661 Handle<SharedFunctionInfo> shared(info_->shared_info());
671 if (FLAG_trap_on_deopt)
__ int3();
689 if (FLAG_trap_on_deopt)
__ int3();
692 if (FLAG_trap_on_deopt) {
705 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
706 int length = deoptimizations_.length();
707 if (length == 0)
return;
708 Handle<DeoptimizationInputData> data =
709 factory()->NewDeoptimizationInputData(length,
TENURED);
711 Handle<ByteArray> translations = translations_.CreateByteArray();
712 data->SetTranslationByteArray(*translations);
713 data->SetInlinedFunctionCount(
Smi::FromInt(inlined_function_count_));
715 Handle<FixedArray> literals =
716 factory()->NewFixedArray(deoptimization_literals_.length(),
TENURED);
717 for (
int i = 0; i < deoptimization_literals_.length(); i++) {
718 literals->set(i, *deoptimization_literals_[i]);
720 data->SetLiteralArray(*literals);
722 data->SetOsrAstId(
Smi::FromInt(info_->osr_ast_id().ToInt()));
726 for (
int i = 0; i < length; i++) {
727 LEnvironment* env = deoptimizations_[i];
728 data->SetAstId(i, env->ast_id());
729 data->SetTranslationIndex(i,
Smi::FromInt(env->translation_index()));
730 data->SetArgumentsStackHeight(i,
734 code->set_deoptimization_data(*data);
738 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
739 int result = deoptimization_literals_.length();
740 for (
int i = 0; i < deoptimization_literals_.length(); ++i) {
741 if (deoptimization_literals_[i].is_identical_to(literal))
return i;
743 deoptimization_literals_.Add(literal, zone());
748 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
749 ASSERT(deoptimization_literals_.length() == 0);
751 const ZoneList<Handle<JSFunction> >* inlined_closures =
752 chunk()->inlined_closures();
754 for (
int i = 0, length = inlined_closures->length();
757 DefineDeoptimizationLiteral(inlined_closures->at(i));
760 inlined_function_count_ = deoptimization_literals_.length();
764 void LCodeGen::RecordSafepointWithLazyDeopt(
765 LInstruction* instr, SafepointMode safepoint_mode) {
766 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
767 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
769 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
770 RecordSafepointWithRegisters(
771 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
776 void LCodeGen::RecordSafepoint(
777 LPointerMap* pointers,
778 Safepoint::Kind kind,
780 Safepoint::DeoptMode deopt_mode) {
781 ASSERT(kind == expected_safepoint_kind_);
782 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
783 Safepoint safepoint =
784 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
785 for (
int i = 0; i < operands->length(); i++) {
786 LOperand* pointer = operands->at(i);
787 if (pointer->IsStackSlot()) {
788 safepoint.DefinePointerSlot(pointer->index(), zone());
789 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
790 safepoint.DefinePointerRegister(
ToRegister(pointer), zone());
796 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
797 Safepoint::DeoptMode mode) {
798 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
802 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
803 LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
804 RecordSafepoint(&empty_pointers, mode);
808 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
810 Safepoint::DeoptMode mode) {
811 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
815 void LCodeGen::RecordPosition(
int position) {
816 if (position == RelocInfo::kNoPosition)
return;
817 masm()->positions_recorder()->RecordPosition(position);
821 void LCodeGen::DoLabel(LLabel* label) {
822 if (label->is_loop_header()) {
823 Comment(
";;; B%d - LOOP entry", label->block_id());
825 Comment(
";;; B%d", label->block_id());
827 __ bind(label->label());
828 current_block_ = label->block_id();
833 void LCodeGen::DoParallelMove(LParallelMove* move) {
834 resolver_.Resolve(move);
838 void LCodeGen::DoGap(LGap* gap) {
843 LParallelMove* move = gap->GetParallelMove(inner_pos);
844 if (move !=
NULL) DoParallelMove(move);
849 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
854 void LCodeGen::DoParameter(LParameter* instr) {
859 void LCodeGen::DoCallStub(LCallStub* instr) {
862 switch (instr->hydrogen()->major_key()) {
863 case CodeStub::RegExpConstructResult: {
864 RegExpConstructResultStub stub;
865 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
868 case CodeStub::RegExpExec: {
870 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
875 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
878 case CodeStub::NumberToString: {
879 NumberToStringStub stub;
880 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
883 case CodeStub::StringAdd: {
885 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
888 case CodeStub::StringCompare: {
889 StringCompareStub stub;
890 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
893 case CodeStub::TranscendentalCache: {
894 TranscendentalCacheStub stub(instr->transcendental_type(),
896 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
905 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
910 void LCodeGen::DoModI(LModI* instr) {
911 if (instr->hydrogen()->HasPowerOf2Divisor()) {
912 Register dividend =
ToRegister(instr->left());
917 if (divisor < 0) divisor = -divisor;
919 Label positive_dividend, done;
920 __ test(dividend, Operand(dividend));
921 __ j(
not_sign, &positive_dividend, Label::kNear);
923 __ and_(dividend, divisor - 1);
929 __ jmp(&done, Label::kNear);
931 __ bind(&positive_dividend);
932 __ and_(dividend, divisor - 1);
935 Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
936 Register left_reg =
ToRegister(instr->left());
937 Register right_reg =
ToRegister(instr->right());
938 Register result_reg =
ToRegister(instr->result());
947 __ test(right_reg, Operand(right_reg));
948 DeoptimizeIf(
zero, instr->environment());
951 __ test(left_reg, Operand(left_reg));
952 __ j(
zero, &remainder_eq_dividend, Label::kNear);
953 __ j(
sign, &slow, Label::kNear);
955 __ test(right_reg, Operand(right_reg));
960 __ bind(&both_positive);
963 __ cmp(left_reg, Operand(right_reg));
964 __ j(
less, &remainder_eq_dividend, Label::kNear);
968 __ mov(scratch, right_reg);
969 __ sub(Operand(scratch), Immediate(1));
970 __ test(scratch, Operand(right_reg));
971 __ j(
not_zero, &do_subtraction, Label::kNear);
972 __ and_(left_reg, Operand(scratch));
973 __ jmp(&remainder_eq_dividend, Label::kNear);
975 __ bind(&do_subtraction);
976 const int kUnfolds = 3;
978 __ mov(scratch, left_reg);
979 for (
int i = 0; i < kUnfolds; i++) {
981 __ sub(left_reg, Operand(right_reg));
983 __ cmp(left_reg, Operand(right_reg));
984 __ j(
less, &remainder_eq_dividend, Label::kNear);
986 __ mov(left_reg, scratch);
997 __ test(left_reg, Operand(left_reg));
1002 __ test(result_reg, Operand(result_reg));
1006 __ bind(&positive_left);
1012 __ jmp(&done, Label::kNear);
1014 __ bind(&remainder_eq_dividend);
1015 __ mov(result_reg, left_reg);
1022 void LCodeGen::DoDivI(LDivI* instr) {
1023 LOperand* right = instr->right();
1029 Register left_reg =
eax;
1034 __ test(right_reg, ToOperand(right));
1035 DeoptimizeIf(
zero, instr->environment());
1040 Label left_not_zero;
1041 __ test(left_reg, Operand(left_reg));
1042 __ j(
not_zero, &left_not_zero, Label::kNear);
1043 __ test(right_reg, ToOperand(right));
1044 DeoptimizeIf(
sign, instr->environment());
1045 __ bind(&left_not_zero);
1050 Label left_not_min_int;
1052 __ j(
not_zero, &left_not_min_int, Label::kNear);
1053 __ cmp(right_reg, -1);
1054 DeoptimizeIf(
zero, instr->environment());
1055 __ bind(&left_not_min_int);
1064 DeoptimizeIf(
not_zero, instr->environment());
1068 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1069 ASSERT(instr->right()->IsConstantOperand());
1071 Register dividend =
ToRegister(instr->left());
1073 Register result =
ToRegister(instr->result());
1081 __ Move(result, dividend);
1085 __ Move(result, dividend);
1088 DeoptimizeIf(
zero, instr->environment());
1091 DeoptimizeIf(
overflow, instr->environment());
1096 uint32_t divisor_abs = abs(divisor);
1102 __ mov(result, dividend);
1103 __ sar(dividend, 31);
1106 DeoptimizeIf(
zero, instr->environment());
1108 __ shl(dividend, 32 - power);
1109 __ sar(result, power);
1112 __ and_(result, dividend);
1114 __ Move(result, dividend);
1115 __ sar(result, power);
1120 Register scratch =
ToRegister(instr->temp());
1123 unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
1124 unsigned shift = 32 + b;
1125 double multiplier_f =
1126 static_cast<double>(
static_cast<uint64_t
>(1) << shift) / divisor_abs;
1128 if (multiplier_f - floor(multiplier_f) < 0.5) {
1129 multiplier =
static_cast<int64_t
>(floor(multiplier_f));
1131 multiplier =
static_cast<int64_t
>(floor(multiplier_f)) + 1;
1135 multiplier < (static_cast<int64_t>(1) << 32));
1136 __ mov(scratch, dividend);
1139 __ test(dividend, dividend);
1140 DeoptimizeIf(
zero, instr->environment());
1142 __ mov(
edx, static_cast<int32_t>(multiplier));
1144 if (static_cast<int32_t>(multiplier) < 0) {
1145 __ add(
edx, scratch);
1147 Register reg_lo =
eax;
1148 Register reg_byte_scratch = scratch;
1149 if (!reg_byte_scratch.is_byte_register()) {
1150 __ xchg(reg_lo, reg_byte_scratch);
1152 reg_byte_scratch =
eax;
1155 __ xor_(reg_byte_scratch, reg_byte_scratch);
1156 __ cmp(reg_lo, 0x40000000);
1157 __ setcc(
above, reg_byte_scratch);
1159 __ sub(
edx, reg_byte_scratch);
1161 __ xor_(reg_byte_scratch, reg_byte_scratch);
1162 __ cmp(reg_lo, 0xC0000000);
1164 __ add(
edx, reg_byte_scratch);
1166 __ sar(
edx, shift - 32);
1171 void LCodeGen::DoMulI(LMulI* instr) {
1173 LOperand* right = instr->right();
1179 if (right->IsConstantOperand()) {
1184 if (constant == -1) {
1186 }
else if (constant == 0) {
1187 __ xor_(left, Operand(left));
1188 }
else if (constant == 2) {
1189 __ add(left, Operand(left));
1199 __ lea(left, Operand(left, left,
times_2, 0));
1205 __ lea(left, Operand(left, left,
times_4, 0));
1211 __ lea(left, Operand(left, left,
times_8, 0));
1217 __ imul(left, left, constant);
1221 __ imul(left, left, constant);
1224 __ imul(left, ToOperand(right));
1228 DeoptimizeIf(
overflow, instr->environment());
1234 __ test(left, Operand(left));
1236 if (right->IsConstantOperand()) {
1241 DeoptimizeIf(
less, instr->environment());
1246 DeoptimizeIf(
sign, instr->environment());
1253 void LCodeGen::DoBitI(LBitI* instr) {
1254 LOperand* left = instr->left();
1255 LOperand* right = instr->right();
1256 ASSERT(left->Equals(instr->result()));
1257 ASSERT(left->IsRegister());
1259 if (right->IsConstantOperand()) {
1261 switch (instr->op()) {
1262 case Token::BIT_AND:
1268 case Token::BIT_XOR:
1276 switch (instr->op()) {
1277 case Token::BIT_AND:
1283 case Token::BIT_XOR:
1294 void LCodeGen::DoShiftI(LShiftI* instr) {
1295 LOperand* left = instr->left();
1296 LOperand* right = instr->right();
1297 ASSERT(left->Equals(instr->result()));
1298 ASSERT(left->IsRegister());
1299 if (right->IsRegister()) {
1302 switch (instr->op()) {
1308 if (instr->can_deopt()) {
1310 DeoptimizeIf(
not_zero, instr->environment());
1322 uint8_t shift_count =
static_cast<uint8_t
>(value & 0x1F);
1323 switch (instr->op()) {
1325 if (shift_count != 0) {
1330 if (shift_count == 0 && instr->can_deopt()) {
1332 DeoptimizeIf(
not_zero, instr->environment());
1338 if (shift_count != 0) {
1350 void LCodeGen::DoSubI(LSubI* instr) {
1351 LOperand* left = instr->left();
1352 LOperand* right = instr->right();
1353 ASSERT(left->Equals(instr->result()));
1355 if (right->IsConstantOperand()) {
1356 __ sub(ToOperand(left), ToInteger32Immediate(right));
1361 DeoptimizeIf(
overflow, instr->environment());
1366 void LCodeGen::DoConstantI(LConstantI* instr) {
1367 ASSERT(instr->result()->IsRegister());
1368 __ Set(
ToRegister(instr->result()), Immediate(instr->value()));
1372 void LCodeGen::DoConstantD(LConstantD* instr) {
1373 ASSERT(instr->result()->IsDoubleRegister());
1374 XMMRegister res = ToDoubleRegister(instr->result());
1375 double v = instr->value();
1378 if (BitCast<uint64_t, double>(v) == 0) {
1382 uint64_t int_val = BitCast<uint64_t, double>(v);
1386 CpuFeatures::Scope scope(
SSE4_1);
1388 __ Set(temp, Immediate(lower));
1389 __ movd(res, Operand(temp));
1390 __ Set(temp, Immediate(upper));
1391 __ pinsrd(res, Operand(temp), 1);
1394 __ Set(temp, Immediate(upper));
1395 __ pinsrd(res, Operand(temp), 1);
1398 __ Set(temp, Immediate(upper));
1399 __ movd(res, Operand(temp));
1402 __ Set(temp, Immediate(lower));
1403 __ movd(
xmm0, Operand(temp));
1411 void LCodeGen::DoConstantT(LConstantT* instr) {
1413 Handle<Object> handle = instr->value();
1414 if (handle->IsHeapObject()) {
1415 __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
1417 __ Set(reg, Immediate(handle));
1422 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1423 Register result =
ToRegister(instr->result());
1429 void LCodeGen::DoFixedArrayBaseLength(
1430 LFixedArrayBaseLength* instr) {
1431 Register result =
ToRegister(instr->result());
1437 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1438 Register result =
ToRegister(instr->result());
1440 __ EnumLength(result, map);
1444 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1445 Register result =
ToRegister(instr->result());
1459 void LCodeGen::DoValueOf(LValueOf* instr) {
1461 Register result =
ToRegister(instr->result());
1463 ASSERT(input.is(result));
1467 __ JumpIfSmi(input, &done, Label::kNear);
1478 void LCodeGen::DoDateField(LDateField* instr) {
1480 Register result =
ToRegister(instr->result());
1481 Register scratch =
ToRegister(instr->temp());
1482 Smi* index = instr->index();
1483 Label runtime, done;
1484 ASSERT(
object.is(result));
1488 DeoptimizeIf(
zero, instr->environment());
1490 DeoptimizeIf(
not_equal, instr->environment());
1492 if (index->value() == 0) {
1496 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1497 __ mov(scratch, Operand::StaticVariable(stamp));
1505 __ PrepareCallCFunction(2, scratch);
1506 __ mov(Operand(
esp, 0),
object);
1508 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1514 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1515 LOperand* input = instr->value();
1516 ASSERT(input->Equals(instr->result()));
1521 void LCodeGen::DoThrow(LThrow* instr) {
1522 __ push(ToOperand(instr->value()));
1524 CallRuntime(Runtime::kThrow, 1, instr);
1526 if (FLAG_debug_code) {
1527 Comment(
"Unreachable code.");
1533 void LCodeGen::DoAddI(LAddI* instr) {
1534 LOperand* left = instr->left();
1535 LOperand* right = instr->right();
1536 ASSERT(left->Equals(instr->result()));
1538 if (right->IsConstantOperand()) {
1539 __ add(ToOperand(left), ToInteger32Immediate(right));
1545 DeoptimizeIf(
overflow, instr->environment());
1550 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1551 LOperand* left = instr->left();
1552 LOperand* right = instr->right();
1553 ASSERT(left->Equals(instr->result()));
1555 if (instr->hydrogen()->representation().IsInteger32()) {
1560 if (right->IsConstantOperand()) {
1561 Operand left_op = ToOperand(left);
1562 Immediate right_imm = ToInteger32Immediate(right);
1563 __ cmp(left_op, right_imm);
1564 __ j(condition, &return_left, Label::kNear);
1565 __ mov(left_op, right_imm);
1568 Operand right_op = ToOperand(right);
1569 __ cmp(left_reg, right_op);
1570 __ j(condition, &return_left, Label::kNear);
1571 __ mov(left_reg, right_op);
1573 __ bind(&return_left);
1575 ASSERT(instr->hydrogen()->representation().IsDouble());
1576 Label check_nan_left, check_zero, return_left, return_right;
1578 XMMRegister left_reg = ToDoubleRegister(left);
1579 XMMRegister right_reg = ToDoubleRegister(right);
1580 __ ucomisd(left_reg, right_reg);
1582 __ j(
equal, &check_zero, Label::kNear);
1583 __ j(condition, &return_left, Label::kNear);
1584 __ jmp(&return_right, Label::kNear);
1586 __ bind(&check_zero);
1587 XMMRegister xmm_scratch =
xmm0;
1588 __ xorps(xmm_scratch, xmm_scratch);
1589 __ ucomisd(left_reg, xmm_scratch);
1593 __ orpd(left_reg, right_reg);
1596 __ addsd(left_reg, right_reg);
1598 __ jmp(&return_left, Label::kNear);
1600 __ bind(&check_nan_left);
1601 __ ucomisd(left_reg, left_reg);
1603 __ bind(&return_right);
1604 __ movsd(left_reg, right_reg);
1606 __ bind(&return_left);
1611 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1612 XMMRegister left = ToDoubleRegister(instr->left());
1613 XMMRegister right = ToDoubleRegister(instr->right());
1614 XMMRegister result = ToDoubleRegister(instr->result());
1616 ASSERT(instr->op() == Token::MOD || left.is(result));
1617 switch (instr->op()) {
1619 __ addsd(left, right);
1622 __ subsd(left, right);
1625 __ mulsd(left, right);
1628 __ divsd(left, right);
1632 __ PrepareCallCFunction(4,
eax);
1636 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1642 __ fstp_d(Operand(
esp, 0));
1643 __ movdbl(result, Operand(
esp, 0));
1654 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1661 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1666 int LCodeGen::GetNextEmittedBlock(
int block) {
1667 for (
int i = block + 1; i < graph()->blocks()->length(); ++i) {
1668 LLabel* label = chunk_->GetLabel(i);
1669 if (!label->HasReplacement())
return i;
1675 void LCodeGen::EmitBranch(
int left_block,
int right_block,
Condition cc) {
1676 int next_block = GetNextEmittedBlock(current_block_);
1677 right_block = chunk_->LookupDestination(right_block);
1678 left_block = chunk_->LookupDestination(left_block);
1680 if (right_block == left_block) {
1681 EmitGoto(left_block);
1682 }
else if (left_block == next_block) {
1684 }
else if (right_block == next_block) {
1685 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1687 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1688 __ jmp(chunk_->GetAssemblyLabel(right_block));
1693 void LCodeGen::DoBranch(LBranch* instr) {
1694 int true_block = chunk_->LookupDestination(instr->true_block_id());
1695 int false_block = chunk_->LookupDestination(instr->false_block_id());
1697 Representation r = instr->hydrogen()->value()->representation();
1698 if (r.IsInteger32()) {
1700 __ test(reg, Operand(reg));
1701 EmitBranch(true_block, false_block,
not_zero);
1702 }
else if (r.IsDouble()) {
1703 XMMRegister reg = ToDoubleRegister(instr->value());
1706 EmitBranch(true_block, false_block,
not_equal);
1710 HType type = instr->hydrogen()->value()->type();
1711 if (type.IsBoolean()) {
1712 __ cmp(reg, factory()->true_value());
1713 EmitBranch(true_block, false_block,
equal);
1714 }
else if (type.IsSmi()) {
1715 __ test(reg, Operand(reg));
1716 EmitBranch(true_block, false_block,
not_equal);
1718 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1719 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1721 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1727 __ cmp(reg, factory()->undefined_value());
1732 __ cmp(reg, factory()->true_value());
1735 __ cmp(reg, factory()->false_value());
1740 __ cmp(reg, factory()->null_value());
1746 __ test(reg, Operand(reg));
1748 __ JumpIfSmi(reg, true_label);
1749 }
else if (expected.NeedsMap()) {
1752 DeoptimizeIf(
zero, instr->environment());
1756 if (expected.NeedsMap()) {
1761 if (expected.CanBeUndetectable()) {
1782 __ jmp(false_label);
1783 __ bind(¬_string);
1788 Label not_heap_number;
1790 factory()->heap_number_map());
1797 __ bind(¬_heap_number);
1807 void LCodeGen::EmitGoto(
int block) {
1808 block = chunk_->LookupDestination(block);
1809 int next_block = GetNextEmittedBlock(current_block_);
1810 if (block != next_block) {
1811 __ jmp(chunk_->GetAssemblyLabel(block));
1816 void LCodeGen::DoGoto(LGoto* instr) {
1817 EmitGoto(instr->block_id());
1825 case Token::EQ_STRICT:
1841 case Token::INSTANCEOF:
1849 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1850 LOperand* left = instr->left();
1851 LOperand* right = instr->right();
1852 int false_block = chunk_->LookupDestination(instr->false_block_id());
1853 int true_block = chunk_->LookupDestination(instr->true_block_id());
1854 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1856 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1863 EmitGoto(next_block);
1865 if (instr->is_double()) {
1868 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1871 if (right->IsConstantOperand()) {
1873 }
else if (left->IsConstantOperand()) {
1874 __ cmp(ToOperand(right), ToInteger32Immediate(left));
1881 EmitBranch(true_block, false_block, cc);
1886 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1888 Operand right = ToOperand(instr->right());
1889 int false_block = chunk_->LookupDestination(instr->false_block_id());
1890 int true_block = chunk_->LookupDestination(instr->true_block_id());
1892 __ cmp(left, Operand(right));
1893 EmitBranch(true_block, false_block,
equal);
1897 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1899 int true_block = chunk_->LookupDestination(instr->true_block_id());
1900 int false_block = chunk_->LookupDestination(instr->false_block_id());
1902 __ cmp(left, instr->hydrogen()->right());
1903 EmitBranch(true_block, false_block,
equal);
1907 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1909 int false_block = chunk_->LookupDestination(instr->false_block_id());
1913 if (instr->hydrogen()->representation().IsSpecialization() ||
1914 instr->hydrogen()->type().IsSmi()) {
1915 EmitGoto(false_block);
1919 int true_block = chunk_->LookupDestination(instr->true_block_id());
1920 Handle<Object> nil_value = instr->nil() ==
kNullValue ?
1921 factory()->null_value() :
1922 factory()->undefined_value();
1923 __ cmp(reg, nil_value);
1925 EmitBranch(true_block, false_block,
equal);
1927 Handle<Object> other_nil_value = instr->nil() ==
kNullValue ?
1928 factory()->undefined_value() :
1929 factory()->null_value();
1930 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1931 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1933 __ cmp(reg, other_nil_value);
1935 __ JumpIfSmi(reg, false_label);
1938 Register scratch =
ToRegister(instr->temp());
1942 EmitBranch(true_block, false_block,
not_zero);
1947 Condition LCodeGen::EmitIsObject(Register input,
1949 Label* is_not_object,
1951 __ JumpIfSmi(input, is_not_object);
1953 __ cmp(input, isolate()->factory()->null_value());
1970 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1974 int true_block = chunk_->LookupDestination(instr->true_block_id());
1975 int false_block = chunk_->LookupDestination(instr->false_block_id());
1976 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1977 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1979 Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
1981 EmitBranch(true_block, false_block, true_cond);
1985 Condition LCodeGen::EmitIsString(Register input,
1987 Label* is_not_string) {
1988 __ JumpIfSmi(input, is_not_string);
1990 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
1996 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2000 int true_block = chunk_->LookupDestination(instr->true_block_id());
2001 int false_block = chunk_->LookupDestination(instr->false_block_id());
2002 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2004 Condition true_cond = EmitIsString(reg, temp, false_label);
2006 EmitBranch(true_block, false_block, true_cond);
2010 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2011 Operand input = ToOperand(instr->value());
2013 int true_block = chunk_->LookupDestination(instr->true_block_id());
2014 int false_block = chunk_->LookupDestination(instr->false_block_id());
2017 EmitBranch(true_block, false_block,
zero);
2021 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2025 int true_block = chunk_->LookupDestination(instr->true_block_id());
2026 int false_block = chunk_->LookupDestination(instr->false_block_id());
2029 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
2033 EmitBranch(true_block, false_block,
not_zero);
2039 case Token::EQ_STRICT:
2057 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2059 int true_block = chunk_->LookupDestination(instr->true_block_id());
2060 int false_block = chunk_->LookupDestination(instr->false_block_id());
2063 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2065 Condition condition = ComputeCompareCondition(op);
2068 EmitBranch(true_block, false_block, condition);
2072 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2081 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2084 if (from == to)
return equal;
2092 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2096 int true_block = chunk_->LookupDestination(instr->true_block_id());
2097 int false_block = chunk_->LookupDestination(instr->false_block_id());
2099 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2101 __ JumpIfSmi(input, false_label);
2103 __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2104 EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
2108 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2110 Register result =
ToRegister(instr->result());
2112 __ AssertString(input);
2115 __ IndexFromHash(result, result);
2119 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2120 LHasCachedArrayIndexAndBranch* instr) {
2123 int true_block = chunk_->LookupDestination(instr->true_block_id());
2124 int false_block = chunk_->LookupDestination(instr->false_block_id());
2128 EmitBranch(true_block, false_block,
equal);
2134 void LCodeGen::EmitClassOfTest(Label* is_true,
2136 Handle<String>class_name,
2141 ASSERT(!input.is(temp2));
2143 __ JumpIfSmi(input, is_false);
2145 if (class_name->IsEqualTo(
CStrVector(
"Function"))) {
2175 if (class_name->IsEqualTo(
CStrVector(
"Object"))) {
2192 __ cmp(temp, class_name);
2197 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2202 Handle<String> class_name = instr->hydrogen()->class_name();
2204 int true_block = chunk_->LookupDestination(instr->true_block_id());
2205 int false_block = chunk_->LookupDestination(instr->false_block_id());
2207 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2208 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2210 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2212 EmitBranch(true_block, false_block,
equal);
2216 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2218 int true_block = instr->true_block_id();
2219 int false_block = instr->false_block_id();
2222 EmitBranch(true_block, false_block,
equal);
2226 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2230 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2232 Label true_value, done;
2234 __ j(
zero, &true_value, Label::kNear);
2235 __ mov(
ToRegister(instr->result()), factory()->false_value());
2236 __ jmp(&done, Label::kNear);
2237 __ bind(&true_value);
2238 __ mov(
ToRegister(instr->result()), factory()->true_value());
2243 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2244 class DeferredInstanceOfKnownGlobal:
public LDeferredCode {
2246 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2247 LInstanceOfKnownGlobal* instr)
2248 : LDeferredCode(codegen), instr_(instr) { }
2249 virtual void Generate() {
2250 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2252 virtual LInstruction* instr() {
return instr_; }
2253 Label* map_check() {
return &map_check_; }
2255 LInstanceOfKnownGlobal* instr_;
2259 DeferredInstanceOfKnownGlobal* deferred;
2260 deferred =
new(zone()) DeferredInstanceOfKnownGlobal(
this, instr);
2262 Label done, false_result;
2263 Register
object =
ToRegister(instr->value());
2267 __ JumpIfSmi(
object, &false_result);
2275 __ bind(deferred->map_check());
2276 Handle<JSGlobalPropertyCell> cache_cell =
2277 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2278 __ cmp(map, Operand::Cell(cache_cell));
2280 __ mov(
eax, factory()->the_hole_value());
2285 __ bind(&cache_miss);
2287 __ cmp(
object, factory()->null_value());
2291 Condition is_string = masm_->IsObjectStringType(
object, temp, temp);
2292 __ j(is_string, &false_result);
2295 __ jmp(deferred->entry());
2297 __ bind(&false_result);
2298 __ mov(
ToRegister(instr->result()), factory()->false_value());
2302 __ bind(deferred->exit());
2307 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2309 PushSafepointRegistersScope scope(
this);
2318 InstanceofStub stub(flags);
2325 ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
2327 static const int kAdditionalDelta = 13;
2328 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2329 __ mov(temp, Immediate(delta));
2330 __ StoreToSafepointRegisterSlot(temp, temp);
2331 CallCodeGeneric(stub.GetCode(),
2332 RelocInfo::CODE_TARGET,
2334 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2337 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2338 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2341 __ StoreToSafepointRegisterSlot(
eax,
eax);
2345 void LCodeGen::DoCmpT(LCmpT* instr) {
2349 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2351 Condition condition = ComputeCompareCondition(op);
2352 Label true_value, done;
2354 __ j(condition, &true_value, Label::kNear);
2355 __ mov(
ToRegister(instr->result()), factory()->false_value());
2356 __ jmp(&done, Label::kNear);
2357 __ bind(&true_value);
2358 __ mov(
ToRegister(instr->result()), factory()->true_value());
2363 void LCodeGen::DoReturn(LReturn* instr) {
2371 __ CallRuntime(Runtime::kTraceExit, 1);
2373 if (dynamic_frame_alignment_) {
2380 if (dynamic_frame_alignment_) {
2384 if (FLAG_debug_code) {
2387 __ Assert(
equal,
"expected alignment marker");
2390 __ bind(&no_padding);
2396 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2397 Register result =
ToRegister(instr->result());
2398 __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
2399 if (instr->hydrogen()->RequiresHoleCheck()) {
2400 __ cmp(result, factory()->the_hole_value());
2401 DeoptimizeIf(
equal, instr->environment());
2406 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2411 __ mov(
ecx, instr->name());
2412 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
2413 RelocInfo::CODE_TARGET_CONTEXT;
2414 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2415 CallCode(ic, mode, instr);
2419 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2421 Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
2427 if (instr->hydrogen()->RequiresHoleCheck()) {
2428 __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
2429 DeoptimizeIf(
equal, instr->environment());
2433 __ mov(Operand::Cell(cell_handle), value);
2438 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2443 __ mov(
ecx, instr->name());
2444 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
2445 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2446 : isolate()->builtins()->StoreIC_Initialize();
2447 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2451 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2452 Register context =
ToRegister(instr->context());
2453 Register result =
ToRegister(instr->result());
2456 if (instr->hydrogen()->RequiresHoleCheck()) {
2457 __ cmp(result, factory()->the_hole_value());
2458 if (instr->hydrogen()->DeoptimizesOnHole()) {
2459 DeoptimizeIf(
equal, instr->environment());
2463 __ mov(result, factory()->undefined_value());
2464 __ bind(&is_not_hole);
2470 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2471 Register context =
ToRegister(instr->context());
2474 Label skip_assignment;
2477 if (instr->hydrogen()->RequiresHoleCheck()) {
2478 __ cmp(target, factory()->the_hole_value());
2479 if (instr->hydrogen()->DeoptimizesOnHole()) {
2480 DeoptimizeIf(
equal, instr->environment());
2486 __ mov(target, value);
2487 if (instr->hydrogen()->NeedsWriteBarrier()) {
2488 HType type = instr->hydrogen()->value()->type();
2493 __ RecordWriteContextSlot(context,
2502 __ bind(&skip_assignment);
2506 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2507 Register
object =
ToRegister(instr->object());
2508 Register result =
ToRegister(instr->result());
2509 if (instr->hydrogen()->is_in_object()) {
2510 __ mov(result,
FieldOperand(
object, instr->hydrogen()->offset()));
2513 __ mov(result,
FieldOperand(result, instr->hydrogen()->offset()));
2518 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2521 Handle<String> name,
2522 LEnvironment* env) {
2523 LookupResult lookup(isolate());
2524 type->LookupDescriptor(
NULL, *name, &lookup);
2525 ASSERT(lookup.IsFound() || lookup.IsCacheable());
2526 if (lookup.IsField()) {
2527 int index = lookup.GetLocalFieldIndexFromMap(*type);
2532 __ mov(result,
FieldOperand(
object, offset + type->instance_size()));
2538 }
else if (lookup.IsConstantFunction()) {
2539 Handle<JSFunction>
function(lookup.GetConstantFunctionFromMap(*type));
2540 __ LoadHeapObject(result,
function);
2545 Heap* heap = type->GetHeap();
2546 while (*current != heap->null_value()) {
2547 __ LoadHeapObject(result, current);
2549 Handle<Map>(current->map()));
2554 __ mov(result, factory()->undefined_value());
2559 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
2560 ASSERT(!operand->IsDoubleRegister());
2561 if (operand->IsConstantOperand()) {
2563 if (object->IsSmi()) {
2564 __ Push(Handle<Smi>::cast(
object));
2566 __ PushHeapObject(Handle<HeapObject>::cast(
object));
2568 }
else if (operand->IsRegister()) {
2571 __ push(ToOperand(operand));
2578 static bool CompactEmit(SmallMapList* list,
2579 Handle<String> name,
2582 Handle<Map> map = list->at(i);
2585 if (map->HasElementsTransition())
return false;
2586 LookupResult lookup(isolate);
2587 map->LookupDescriptor(
NULL, *name, &lookup);
2588 return lookup.IsField() || lookup.IsConstantFunction();
2592 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2593 Register
object =
ToRegister(instr->object());
2594 Register result =
ToRegister(instr->result());
2596 int map_count = instr->hydrogen()->types()->length();
2597 bool need_generic = instr->hydrogen()->need_generic();
2599 if (map_count == 0 && !need_generic) {
2603 Handle<String> name = instr->hydrogen()->name();
2605 bool all_are_compact =
true;
2606 for (
int i = 0; i < map_count; ++i) {
2607 if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
2608 all_are_compact =
false;
2612 for (
int i = 0; i < map_count; ++i) {
2613 bool last = (i == map_count - 1);
2614 Handle<Map> map = instr->hydrogen()->types()->at(i);
2617 if (last && !need_generic) {
2618 DeoptimizeIf(
not_equal, instr->environment());
2619 __ bind(&check_passed);
2620 EmitLoadFieldOrConstantFunction(
2621 result,
object, map, name, instr->environment());
2624 bool compact = all_are_compact ?
true :
2625 CompactEmit(instr->hydrogen()->types(), name, i, isolate());
2626 __ j(
not_equal, &next, compact ? Label::kNear : Label::kFar);
2627 __ bind(&check_passed);
2628 EmitLoadFieldOrConstantFunction(
2629 result,
object, map, name, instr->environment());
2630 __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
2636 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2637 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2643 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2648 __ mov(
ecx, instr->name());
2649 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2650 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2654 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2655 Register
function =
ToRegister(instr->function());
2657 Register result =
ToRegister(instr->result());
2661 DeoptimizeIf(
not_equal, instr->environment());
2674 __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
2675 DeoptimizeIf(
equal, instr->environment());
2684 __ jmp(&done, Label::kNear);
2688 __ bind(&non_instance);
2696 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2697 Register result =
ToRegister(instr->result());
2698 Register input =
ToRegister(instr->object());
2700 if (FLAG_debug_code) {
2701 Label done, ok, fail;
2703 Immediate(factory()->fixed_array_map()));
2704 __ j(
equal, &done, Label::kNear);
2706 Immediate(factory()->fixed_cow_array_map()));
2707 __ j(
equal, &done, Label::kNear);
2708 Register temp((result.is(
eax)) ?
ebx :
eax);
2715 __ j(
less, &fail, Label::kNear);
2719 __ j(
less, &fail, Label::kNear);
2723 __ Abort(
"Check for fast or external elements failed.");
2731 void LCodeGen::DoLoadExternalArrayPointer(
2732 LLoadExternalArrayPointer* instr) {
2733 Register result =
ToRegister(instr->result());
2734 Register input =
ToRegister(instr->object());
2740 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2741 Register arguments =
ToRegister(instr->arguments());
2742 Register length =
ToRegister(instr->length());
2743 Operand index = ToOperand(instr->index());
2744 Register result =
ToRegister(instr->result());
2747 __ sub(length, index);
2748 __ mov(result, Operand(arguments, length,
times_4, kPointerSize));
2752 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2753 Register result =
ToRegister(instr->result());
2757 BuildFastArrayOperand(instr->elements(),
2759 instr->hydrogen()->key()->representation(),
2762 instr->additional_index()));
2765 if (instr->hydrogen()->RequiresHoleCheck()) {
2768 DeoptimizeIf(
not_equal, instr->environment());
2770 __ cmp(result, factory()->the_hole_value());
2771 DeoptimizeIf(
equal, instr->environment());
2777 void LCodeGen::DoLoadKeyedFastDoubleElement(
2778 LLoadKeyedFastDoubleElement* instr) {
2779 XMMRegister result = ToDoubleRegister(instr->result());
2781 if (instr->hydrogen()->RequiresHoleCheck()) {
2784 Operand hole_check_operand = BuildFastArrayOperand(
2785 instr->elements(), instr->key(),
2786 instr->hydrogen()->key()->representation(),
2789 instr->additional_index());
2791 DeoptimizeIf(
equal, instr->environment());
2794 Operand double_load_operand = BuildFastArrayOperand(
2797 instr->hydrogen()->key()->representation(),
2800 instr->additional_index());
2801 __ movdbl(result, double_load_operand);
2805 Operand LCodeGen::BuildFastArrayOperand(
2806 LOperand* elements_pointer,
2808 Representation key_representation,
2811 uint32_t additional_index) {
2812 Register elements_pointer_reg =
ToRegister(elements_pointer);
2818 if (key_representation.IsTagged() && (shift_size >= 1)) {
2821 if (key->IsConstantOperand()) {
2823 if (constant_value & 0xF0000000) {
2824 Abort(
"array index constant value too big");
2826 return Operand(elements_pointer_reg,
2827 ((constant_value + additional_index) << shift_size)
2831 return Operand(elements_pointer_reg,
2834 offset + (additional_index << shift_size));
2839 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2840 LLoadKeyedSpecializedArrayElement* instr) {
2842 LOperand* key = instr->key();
2843 if (!key->IsConstantOperand() &&
2844 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
2848 Operand operand(BuildFastArrayOperand(
2849 instr->external_pointer(),
2851 instr->hydrogen()->key()->representation(),
2854 instr->additional_index()));
2856 XMMRegister result(ToDoubleRegister(instr->result()));
2857 __ movss(result, operand);
2858 __ cvtss2sd(result, result);
2860 __ movdbl(ToDoubleRegister(instr->result()), operand);
2862 Register result(
ToRegister(instr->result()));
2863 switch (elements_kind) {
2865 __ movsx_b(result, operand);
2869 __ movzx_b(result, operand);
2872 __ movsx_w(result, operand);
2875 __ movzx_w(result, operand);
2878 __ mov(result, operand);
2881 __ mov(result, operand);
2883 __ test(result, Operand(result));
2884 DeoptimizeIf(
negative, instr->environment());
2904 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2909 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2910 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2914 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2915 Register result =
ToRegister(instr->result());
2917 if (instr->hydrogen()->from_inlined()) {
2918 __ lea(result, Operand(
esp, -2 * kPointerSize));
2921 Label done, adapted;
2924 __ cmp(Operand(result),
2926 __ j(
equal, &adapted, Label::kNear);
2929 __ mov(result, Operand(
ebp));
2930 __ jmp(&done, Label::kNear);
2943 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2944 Operand elem = ToOperand(instr->elements());
2945 Register result =
ToRegister(instr->result());
2951 __ mov(result, Immediate(scope()->num_parameters()));
2952 __ j(
equal, &done, Label::kNear);
2956 __ mov(result, Operand(result,
2958 __ SmiUntag(result);
2965 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2966 Register receiver =
ToRegister(instr->receiver());
2967 Register
function =
ToRegister(instr->function());
2968 Register scratch =
ToRegister(instr->temp());
2973 Label global_object, receiver_ok;
2989 __ cmp(receiver, factory()->null_value());
2990 __ j(
equal, &global_object, Label::kNear);
2991 __ cmp(receiver, factory()->undefined_value());
2992 __ j(
equal, &global_object, Label::kNear);
2996 DeoptimizeIf(
equal, instr->environment());
2998 DeoptimizeIf(
below, instr->environment());
2999 __ jmp(&receiver_ok, Label::kNear);
3001 __ bind(&global_object);
3009 __ bind(&receiver_ok);
3013 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3014 Register receiver =
ToRegister(instr->receiver());
3015 Register
function =
ToRegister(instr->function());
3016 Register length =
ToRegister(instr->length());
3017 Register elements =
ToRegister(instr->elements());
3024 const uint32_t kArgumentsLimit = 1 *
KB;
3025 __ cmp(length, kArgumentsLimit);
3026 DeoptimizeIf(
above, instr->environment());
3029 __ mov(receiver, length);
3035 __ test(length, Operand(length));
3036 __ j(
zero, &invoke, Label::kNear);
3044 ASSERT(instr->HasPointerMap());
3045 LPointerMap* pointers = instr->pointer_map();
3046 RecordPosition(pointers->position());
3047 SafepointGenerator safepoint_generator(
3048 this, pointers, Safepoint::kLazyDeopt);
3049 ParameterCount actual(
eax);
3055 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3056 LOperand* argument = instr->value();
3057 EmitPushTaggedOperand(argument);
3061 void LCodeGen::DoDrop(LDrop* instr) {
3062 __ Drop(instr->count());
3066 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3067 Register result =
ToRegister(instr->result());
3072 void LCodeGen::DoContext(LContext* instr) {
3073 Register result =
ToRegister(instr->result());
3078 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3079 Register context =
ToRegister(instr->context());
3080 Register result =
ToRegister(instr->result());
3086 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3089 __ push(Immediate(instr->hydrogen()->pairs()));
3091 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3095 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3096 Register context =
ToRegister(instr->context());
3097 Register result =
ToRegister(instr->result());
3103 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3104 Register global =
ToRegister(instr->global());
3105 Register result =
ToRegister(instr->result());
3110 void LCodeGen::CallKnownFunction(Handle<JSFunction>
function,
3112 LInstruction* instr,
3114 EDIState edi_state) {
3115 bool can_invoke_directly = !
function->NeedsArgumentsAdaption() ||
3116 function->shared()->formal_parameter_count() == arity;
3118 LPointerMap* pointers = instr->pointer_map();
3119 RecordPosition(pointers->position());
3121 if (can_invoke_directly) {
3122 if (edi_state == EDI_UNINITIALIZED) {
3123 __ LoadHeapObject(
edi,
function);
3131 if (!function->NeedsArgumentsAdaption()) {
3136 __ SetCallKind(
ecx, call_kind);
3137 if (*
function == *info()->closure()) {
3142 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3146 this, pointers, Safepoint::kLazyDeopt);
3147 ParameterCount count(arity);
3153 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3155 CallKnownFunction(instr->function(),
3163 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3164 Register input_reg =
ToRegister(instr->value());
3166 factory()->heap_number_map());
3167 DeoptimizeIf(
not_equal, instr->environment());
3170 Register tmp = input_reg.is(
eax) ?
ecx :
eax;
3174 PushSafepointRegistersScope scope(
this);
3188 Label allocated, slow;
3189 __ AllocateHeapNumber(tmp, tmp2,
no_reg, &slow);
3195 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3196 instr, instr->context());
3199 if (!tmp.is(
eax))
__ mov(tmp,
eax);
3202 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3204 __ bind(&allocated);
3210 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3216 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3217 Register input_reg =
ToRegister(instr->value());
3218 __ test(input_reg, Operand(input_reg));
3222 __ test(input_reg, Operand(input_reg));
3223 DeoptimizeIf(negative, instr->environment());
3224 __ bind(&is_positive);
3228 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3230 class DeferredMathAbsTaggedHeapNumber:
public LDeferredCode {
3232 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3233 LUnaryMathOperation* instr)
3234 : LDeferredCode(codegen), instr_(instr) { }
3235 virtual void Generate() {
3236 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3238 virtual LInstruction* instr() {
return instr_; }
3240 LUnaryMathOperation* instr_;
3243 ASSERT(instr->value()->Equals(instr->result()));
3244 Representation r = instr->hydrogen()->value()->representation();
3247 XMMRegister scratch =
xmm0;
3248 XMMRegister input_reg = ToDoubleRegister(instr->value());
3249 __ xorps(scratch, scratch);
3250 __ subsd(scratch, input_reg);
3251 __ pand(input_reg, scratch);
3252 }
else if (r.IsInteger32()) {
3253 EmitIntegerMathAbs(instr);
3255 DeferredMathAbsTaggedHeapNumber* deferred =
3256 new(zone()) DeferredMathAbsTaggedHeapNumber(
this, instr);
3257 Register input_reg =
ToRegister(instr->value());
3259 __ JumpIfNotSmi(input_reg, deferred->entry());
3260 EmitIntegerMathAbs(instr);
3261 __ bind(deferred->exit());
3266 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3267 XMMRegister xmm_scratch =
xmm0;
3268 Register output_reg =
ToRegister(instr->result());
3269 XMMRegister input_reg = ToDoubleRegister(instr->value());
3272 CpuFeatures::Scope scope(
SSE4_1);
3276 __ xorps(xmm_scratch, xmm_scratch);
3277 __ ucomisd(input_reg, xmm_scratch);
3279 __ movmskpd(output_reg, input_reg);
3280 __ test(output_reg, Immediate(1));
3281 DeoptimizeIf(
not_zero, instr->environment());
3285 __ cvttsd2si(output_reg, Operand(xmm_scratch));
3287 __ cmp(output_reg, 0x80000000u);
3288 DeoptimizeIf(
equal, instr->environment());
3290 Label negative_sign, done;
3292 __ xorps(xmm_scratch, xmm_scratch);
3293 __ ucomisd(input_reg, xmm_scratch);
3295 __ j(
below, &negative_sign, Label::kNear);
3299 Label positive_sign;
3300 __ j(
above, &positive_sign, Label::kNear);
3301 __ movmskpd(output_reg, input_reg);
3302 __ test(output_reg, Immediate(1));
3303 DeoptimizeIf(
not_zero, instr->environment());
3304 __ Set(output_reg, Immediate(0));
3305 __ jmp(&done, Label::kNear);
3306 __ bind(&positive_sign);
3310 __ cvttsd2si(output_reg, Operand(input_reg));
3312 __ cmp(output_reg, 0x80000000u);
3313 DeoptimizeIf(
equal, instr->environment());
3314 __ jmp(&done, Label::kNear);
3317 __ bind(&negative_sign);
3319 __ cvttsd2si(output_reg, Operand(input_reg));
3320 __ cvtsi2sd(xmm_scratch, output_reg);
3321 __ ucomisd(input_reg, xmm_scratch);
3322 __ j(
equal, &done, Label::kNear);
3323 __ sub(output_reg, Immediate(1));
3324 DeoptimizeIf(
overflow, instr->environment());
3330 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3331 XMMRegister xmm_scratch =
xmm0;
3332 Register output_reg =
ToRegister(instr->result());
3333 XMMRegister input_reg = ToDoubleRegister(instr->value());
3335 Label below_half, done;
3337 ExternalReference one_half = ExternalReference::address_of_one_half();
3338 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
3339 __ ucomisd(xmm_scratch, input_reg);
3342 __ addsd(xmm_scratch, input_reg);
3346 __ cvttsd2si(output_reg, Operand(xmm_scratch));
3349 __ cmp(output_reg, 0x80000000u);
3350 DeoptimizeIf(
equal, instr->environment());
3353 __ bind(&below_half);
3359 __ movmskpd(output_reg, input_reg);
3360 __ test(output_reg, Immediate(1));
3361 DeoptimizeIf(
not_zero, instr->environment());
3364 __ mov(output_reg, Immediate(0xBF000000));
3365 __ movd(xmm_scratch, Operand(output_reg));
3366 __ cvtss2sd(xmm_scratch, xmm_scratch);
3367 __ ucomisd(input_reg, xmm_scratch);
3368 DeoptimizeIf(
below, instr->environment());
3370 __ Set(output_reg, Immediate(0));
3375 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3376 XMMRegister input_reg = ToDoubleRegister(instr->value());
3377 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3378 __ sqrtsd(input_reg, input_reg);
3382 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3383 XMMRegister xmm_scratch =
xmm0;
3384 XMMRegister input_reg = ToDoubleRegister(instr->value());
3385 Register scratch =
ToRegister(instr->temp());
3386 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3394 __ mov(scratch, 0xFF800000);
3395 __ movd(xmm_scratch, scratch);
3396 __ cvtss2sd(xmm_scratch, xmm_scratch);
3397 __ ucomisd(input_reg, xmm_scratch);
3401 __ j(
carry, &sqrt, Label::kNear);
3403 __ xorps(input_reg, input_reg);
3404 __ subsd(input_reg, xmm_scratch);
3405 __ jmp(&done, Label::kNear);
3409 __ xorps(xmm_scratch, xmm_scratch);
3410 __ addsd(input_reg, xmm_scratch);
3411 __ sqrtsd(input_reg, input_reg);
3416 void LCodeGen::DoPower(LPower* instr) {
3417 Representation exponent_type = instr->hydrogen()->right()->representation();
3420 ASSERT(!instr->right()->IsDoubleRegister() ||
3421 ToDoubleRegister(instr->right()).is(
xmm1));
3422 ASSERT(!instr->right()->IsRegister() ||
3424 ASSERT(ToDoubleRegister(instr->left()).is(
xmm2));
3425 ASSERT(ToDoubleRegister(instr->result()).is(
xmm3));
3427 if (exponent_type.IsTagged()) {
3429 __ JumpIfSmi(
eax, &no_deopt);
3431 DeoptimizeIf(
not_equal, instr->environment());
3435 }
else if (exponent_type.IsInteger32()) {
3439 ASSERT(exponent_type.IsDouble());
3446 void LCodeGen::DoRandom(LRandom* instr) {
3447 class DeferredDoRandom:
public LDeferredCode {
3449 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3450 : LDeferredCode(codegen), instr_(instr) { }
3451 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3452 virtual LInstruction* instr() {
return instr_; }
3457 DeferredDoRandom* deferred =
new(zone()) DeferredDoRandom(
this, instr);
3461 ASSERT(ToDoubleRegister(instr->result()).is(
xmm1));
3464 static const int kSeedSize =
sizeof(uint32_t);
3468 static const int kRandomSeedOffset =
3477 __ j(
zero, deferred->entry());
3501 __ and_(
eax, Immediate(0x3FFFF));
3504 __ bind(deferred->exit());
3508 __ mov(
ebx, Immediate(0x49800000));
3517 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3518 __ PrepareCallCFunction(1,
ebx);
3520 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3525 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3526 ASSERT(instr->value()->Equals(instr->result()));
3527 XMMRegister input_reg = ToDoubleRegister(instr->value());
3530 __ ucomisd(input_reg,
xmm0);
3531 __ j(
above, &positive, Label::kNear);
3532 __ j(
equal, &zero, Label::kNear);
3533 ExternalReference nan =
3534 ExternalReference::address_of_canonical_non_hole_nan();
3535 __ movdbl(input_reg, Operand::StaticVariable(nan));
3536 __ jmp(&done, Label::kNear);
3538 __ push(Immediate(0xFFF00000));
3539 __ push(Immediate(0));
3540 __ movdbl(input_reg, Operand(
esp, 0));
3542 __ jmp(&done, Label::kNear);
3546 __ movdbl(Operand(
esp, 0), input_reg);
3547 __ fld_d(Operand(
esp, 0));
3549 __ fstp_d(Operand(
esp, 0));
3550 __ movdbl(input_reg, Operand(
esp, 0));
3556 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3557 ASSERT(ToDoubleRegister(instr->result()).is(
xmm1));
3560 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3564 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3565 ASSERT(ToDoubleRegister(instr->result()).is(
xmm1));
3568 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3572 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3573 ASSERT(ToDoubleRegister(instr->result()).is(
xmm1));
3576 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3580 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3581 switch (instr->op()) {
3613 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3616 ASSERT(instr->HasPointerMap());
3618 if (instr->known_function().is_null()) {
3619 LPointerMap* pointers = instr->pointer_map();
3620 RecordPosition(pointers->position());
3622 this, pointers, Safepoint::kLazyDeopt);
3623 ParameterCount count(instr->arity());
3626 CallKnownFunction(instr->known_function(),
3630 EDI_CONTAINS_TARGET);
3635 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3640 int arity = instr->arity();
3642 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3643 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3647 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3651 int arity = instr->arity();
3652 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3654 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3655 __ mov(
ecx, instr->name());
3656 CallCode(ic, mode, instr);
3660 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3665 int arity = instr->arity();
3667 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3671 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3675 int arity = instr->arity();
3676 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3678 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3679 __ mov(
ecx, instr->name());
3680 CallCode(ic, mode, instr);
3684 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3686 CallKnownFunction(instr->target(),
3694 void LCodeGen::DoCallNew(LCallNew* instr) {
3700 __ Set(
eax, Immediate(instr->arity()));
3701 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3705 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3706 CallRuntime(instr->function(), instr->arity(), instr);
3710 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3711 Register
object =
ToRegister(instr->object());
3713 int offset = instr->offset();
3715 if (!instr->transition().is_null()) {
3716 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
3720 Register temp_map =
ToRegister(instr->temp_map());
3721 __ mov(temp_map, instr->transition());
3724 __ RecordWriteField(
object,
3735 HType type = instr->hydrogen()->value()->type();
3738 if (instr->is_in_object()) {
3740 if (instr->hydrogen()->NeedsWriteBarrier()) {
3743 __ RecordWriteField(
object,
3755 if (instr->hydrogen()->NeedsWriteBarrier()) {
3758 __ RecordWriteField(temp,
3770 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3775 __ mov(
ecx, instr->name());
3776 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
3777 ? isolate()->builtins()->StoreIC_Initialize_Strict()
3778 : isolate()->builtins()->StoreIC_Initialize();
3779 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3783 void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
3785 LOperand* operand) {
3786 if (value->representation().IsTagged() && !value->type().IsSmi()) {
3787 if (operand->IsRegister()) {
3792 DeoptimizeIf(
not_zero, environment);
3797 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3798 DeoptIfTaggedButNotSmi(instr->environment(),
3799 instr->hydrogen()->length(),
3801 DeoptIfTaggedButNotSmi(instr->environment(),
3802 instr->hydrogen()->index(),
3804 if (instr->index()->IsConstantOperand()) {
3805 int constant_index =
3807 if (instr->hydrogen()->length()->representation().IsTagged()) {
3808 __ cmp(ToOperand(instr->length()),
3811 __ cmp(ToOperand(instr->length()), Immediate(constant_index));
3815 __ cmp(
ToRegister(instr->index()), ToOperand(instr->length()));
3821 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3822 LStoreKeyedSpecializedArrayElement* instr) {
3824 LOperand* key = instr->key();
3825 if (!key->IsConstantOperand() &&
3826 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3830 Operand operand(BuildFastArrayOperand(
3831 instr->external_pointer(),
3833 instr->hydrogen()->key()->representation(),
3836 instr->additional_index()));
3838 __ cvtsd2ss(
xmm0, ToDoubleRegister(instr->value()));
3841 __ movdbl(operand, ToDoubleRegister(instr->value()));
3844 switch (elements_kind) {
3848 __ mov_b(operand, value);
3852 __ mov_w(operand, value);
3856 __ mov(operand, value);
3875 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3877 Register elements =
ToRegister(instr->object());
3878 Register key = instr->key()->IsRegister() ?
ToRegister(instr->key()) :
no_reg;
3880 Operand operand = BuildFastArrayOperand(
3883 instr->hydrogen()->key()->representation(),
3886 instr->additional_index());
3887 __ mov(operand, value);
3889 if (instr->hydrogen()->NeedsWriteBarrier()) {
3890 ASSERT(!instr->key()->IsConstantOperand());
3891 HType type = instr->hydrogen()->value()->type();
3895 __ lea(key, operand);
3896 __ RecordWrite(elements,
3906 void LCodeGen::DoStoreKeyedFastDoubleElement(
3907 LStoreKeyedFastDoubleElement* instr) {
3908 XMMRegister value = ToDoubleRegister(instr->value());
3910 if (instr->NeedsCanonicalization()) {
3913 __ ucomisd(value, value);
3916 ExternalReference canonical_nan_reference =
3917 ExternalReference::address_of_canonical_non_hole_nan();
3918 __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
3919 __ bind(&have_value);
3922 Operand double_store_operand = BuildFastArrayOperand(
3925 instr->hydrogen()->key()->representation(),
3928 instr->additional_index());
3929 __ movdbl(double_store_operand, value);
3933 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3939 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
3940 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3941 : isolate()->builtins()->KeyedStoreIC_Initialize();
3942 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3946 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3947 Register object_reg =
ToRegister(instr->object());
3948 Register new_map_reg =
ToRegister(instr->new_map_temp());
3950 Handle<Map> from_map = instr->original_map();
3951 Handle<Map> to_map = instr->transitioned_map();
3955 Label not_applicable;
3956 bool is_simple_map_transition =
3958 Label::Distance branch_distance =
3959 is_simple_map_transition ? Label::kNear : Label::kFar;
3961 __ j(
not_equal, ¬_applicable, branch_distance);
3962 if (is_simple_map_transition) {
3963 Register object_reg =
ToRegister(instr->object());
3964 Handle<Map> map = instr->hydrogen()->transitioned_map();
3969 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
3974 __ mov(new_map_reg, to_map);
3975 Register fixed_object_reg =
ToRegister(instr->temp());
3978 __ mov(fixed_object_reg, object_reg);
3979 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3980 RelocInfo::CODE_TARGET, instr);
3983 __ mov(new_map_reg, to_map);
3984 Register fixed_object_reg =
ToRegister(instr->temp());
3987 __ mov(fixed_object_reg, object_reg);
3988 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3989 RelocInfo::CODE_TARGET, instr);
3993 __ bind(¬_applicable);
3997 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3998 class DeferredStringCharCodeAt:
public LDeferredCode {
4000 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4001 : LDeferredCode(codegen), instr_(instr) { }
4002 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
4003 virtual LInstruction* instr() {
return instr_; }
4005 LStringCharCodeAt* instr_;
4008 DeferredStringCharCodeAt* deferred =
4009 new(zone()) DeferredStringCharCodeAt(
this, instr);
4017 __ bind(deferred->exit());
4021 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4022 Register
string =
ToRegister(instr->string());
4023 Register result =
ToRegister(instr->result());
4028 __ Set(result, Immediate(0));
4030 PushSafepointRegistersScope scope(
this);
4035 if (instr->index()->IsConstantOperand()) {
4043 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
4044 instr, instr->context());
4047 __ StoreToSafepointRegisterSlot(result,
eax);
4051 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4052 class DeferredStringCharFromCode:
public LDeferredCode {
4054 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4055 : LDeferredCode(codegen), instr_(instr) { }
4056 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4057 virtual LInstruction* instr() {
return instr_; }
4059 LStringCharFromCode* instr_;
4062 DeferredStringCharFromCode* deferred =
4063 new(zone()) DeferredStringCharFromCode(
this, instr);
4065 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4066 Register char_code =
ToRegister(instr->char_code());
4067 Register result =
ToRegister(instr->result());
4068 ASSERT(!char_code.is(result));
4071 __ j(
above, deferred->entry());
4072 __ Set(result, Immediate(factory()->single_character_string_cache()));
4076 __ cmp(result, factory()->undefined_value());
4077 __ j(
equal, deferred->entry());
4078 __ bind(deferred->exit());
4082 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4083 Register char_code =
ToRegister(instr->char_code());
4084 Register result =
ToRegister(instr->result());
4089 __ Set(result, Immediate(0));
4091 PushSafepointRegistersScope scope(
this);
4092 __ SmiTag(char_code);
4094 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4095 __ StoreToSafepointRegisterSlot(result,
eax);
4099 void LCodeGen::DoStringLength(LStringLength* instr) {
4100 Register
string =
ToRegister(instr->string());
4101 Register result =
ToRegister(instr->result());
4106 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4107 EmitPushTaggedOperand(instr->left());
4108 EmitPushTaggedOperand(instr->right());
4110 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4114 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4115 LOperand* input = instr->value();
4116 ASSERT(input->IsRegister() || input->IsStackSlot());
4117 LOperand* output = instr->result();
4118 ASSERT(output->IsDoubleRegister());
4119 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4123 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4124 LOperand* input = instr->value();
4125 LOperand* output = instr->result();
4126 LOperand* temp = instr->temp();
4128 __ LoadUint32(ToDoubleRegister(output),
4130 ToDoubleRegister(temp));
4134 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4135 class DeferredNumberTagI:
public LDeferredCode {
4137 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4138 : LDeferredCode(codegen), instr_(instr) { }
4139 virtual void Generate() {
4140 codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
4142 virtual LInstruction* instr() {
return instr_; }
4144 LNumberTagI* instr_;
4147 LOperand* input = instr->value();
4148 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4151 DeferredNumberTagI* deferred =
new(zone()) DeferredNumberTagI(
this, instr);
4154 __ bind(deferred->exit());
4158 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4159 class DeferredNumberTagU:
public LDeferredCode {
4161 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4162 : LDeferredCode(codegen), instr_(instr) { }
4163 virtual void Generate() {
4164 codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
4166 virtual LInstruction* instr() {
return instr_; }
4168 LNumberTagU* instr_;
4171 LOperand* input = instr->value();
4172 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4175 DeferredNumberTagU* deferred =
new(zone()) DeferredNumberTagU(
this, instr);
4177 __ j(
above, deferred->entry());
4179 __ bind(deferred->exit());
4183 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4185 IntegerSignedness signedness) {
4191 PushSafepointRegistersScope scope(
this);
4195 if (signedness == SIGNED_INT32) {
4200 __ xor_(reg, 0x80000000);
4201 __ cvtsi2sd(
xmm0, Operand(reg));
4206 if (FLAG_inline_new) {
4207 __ AllocateHeapNumber(reg, tmp,
no_reg, &slow);
4208 __ jmp(&done, Label::kNear);
4217 __ StoreToSafepointRegisterSlot(reg, Immediate(0));
4224 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4225 RecordSafepointWithRegisters(
4226 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4227 if (!reg.is(
eax))
__ mov(reg,
eax);
4233 __ StoreToSafepointRegisterSlot(reg, reg);
4237 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4238 class DeferredNumberTagD:
public LDeferredCode {
4240 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4241 : LDeferredCode(codegen), instr_(instr) { }
4242 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4243 virtual LInstruction* instr() {
return instr_; }
4245 LNumberTagD* instr_;
4248 XMMRegister input_reg = ToDoubleRegister(instr->value());
4252 DeferredNumberTagD* deferred =
new(zone()) DeferredNumberTagD(
this, instr);
4253 if (FLAG_inline_new) {
4254 __ AllocateHeapNumber(reg, tmp,
no_reg, deferred->entry());
4256 __ jmp(deferred->entry());
4258 __ bind(deferred->exit());
4263 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4268 __ Set(reg, Immediate(0));
4270 PushSafepointRegistersScope scope(
this);
4277 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4278 RecordSafepointWithRegisters(
4279 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4280 __ StoreToSafepointRegisterSlot(reg,
eax);
4284 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4285 LOperand* input = instr->value();
4286 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4292 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4293 LOperand* input = instr->value();
4294 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4295 if (instr->needs_check()) {
4297 DeoptimizeIf(
not_zero, instr->environment());
4305 void LCodeGen::EmitNumberUntagD(Register input_reg,
4307 XMMRegister result_reg,
4308 bool deoptimize_on_undefined,
4309 bool deoptimize_on_minus_zero,
4310 LEnvironment* env) {
4311 Label load_smi, done;
4314 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4318 factory()->heap_number_map());
4319 if (deoptimize_on_undefined) {
4323 __ j(
equal, &heap_number, Label::kNear);
4325 __ cmp(input_reg, factory()->undefined_value());
4329 ExternalReference nan =
4330 ExternalReference::address_of_canonical_non_hole_nan();
4331 __ movdbl(result_reg, Operand::StaticVariable(nan));
4332 __ jmp(&done, Label::kNear);
4334 __ bind(&heap_number);
4338 if (deoptimize_on_minus_zero) {
4339 XMMRegister xmm_scratch =
xmm0;
4340 __ xorps(xmm_scratch, xmm_scratch);
4341 __ ucomisd(result_reg, xmm_scratch);
4343 __ movmskpd(temp_reg, result_reg);
4344 __ test_b(temp_reg, 1);
4347 __ jmp(&done, Label::kNear);
4351 __ SmiUntag(input_reg);
4352 __ cvtsi2sd(result_reg, Operand(input_reg));
4353 __ SmiTag(input_reg);
4358 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4359 Label done, heap_number;
4360 Register input_reg =
ToRegister(instr->value());
4364 factory()->heap_number_map());
4366 if (instr->truncating()) {
4367 __ j(
equal, &heap_number, Label::kNear);
4370 __ cmp(input_reg, factory()->undefined_value());
4371 DeoptimizeIf(
not_equal, instr->environment());
4372 __ mov(input_reg, 0);
4373 __ jmp(&done, Label::kNear);
4375 __ bind(&heap_number);
4377 CpuFeatures::Scope scope(
SSE3);
4385 const uint32_t kTooBigExponent =
4387 __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
4388 __ j(
less, &convert, Label::kNear);
4397 __ fisttp_d(Operand(
esp, 0));
4398 __ mov(input_reg, Operand(
esp, 0));
4401 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4403 __ cvttsd2si(input_reg, Operand(
xmm0));
4404 __ cmp(input_reg, 0x80000000u);
4408 ExternalReference min_int = ExternalReference::address_of_min_int();
4409 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
4410 __ ucomisd(xmm_temp,
xmm0);
4411 DeoptimizeIf(
not_equal, instr->environment());
4416 DeoptimizeIf(
not_equal, instr->environment());
4418 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4420 __ cvttsd2si(input_reg, Operand(
xmm0));
4421 __ cvtsi2sd(xmm_temp, Operand(input_reg));
4422 __ ucomisd(
xmm0, xmm_temp);
4423 DeoptimizeIf(
not_equal, instr->environment());
4426 __ test(input_reg, Operand(input_reg));
4428 __ movmskpd(input_reg,
xmm0);
4429 __ and_(input_reg, 1);
4430 DeoptimizeIf(
not_zero, instr->environment());
4437 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4438 class DeferredTaggedToI:
public LDeferredCode {
4440 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4441 : LDeferredCode(codegen), instr_(instr) { }
4442 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4443 virtual LInstruction* instr() {
return instr_; }
4448 LOperand* input = instr->value();
4449 ASSERT(input->IsRegister());
4450 ASSERT(input->Equals(instr->result()));
4454 DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(
this, instr);
4457 __ JumpIfNotSmi(input_reg, deferred->entry());
4460 __ SmiUntag(input_reg);
4462 __ bind(deferred->exit());
4466 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4467 LOperand* input = instr->value();
4468 ASSERT(input->IsRegister());
4469 LOperand* temp = instr->temp();
4471 LOperand* result = instr->result();
4472 ASSERT(result->IsDoubleRegister());
4475 XMMRegister result_reg = ToDoubleRegister(result);
4477 bool deoptimize_on_minus_zero =
4478 instr->hydrogen()->deoptimize_on_minus_zero();
4481 EmitNumberUntagD(input_reg,
4484 instr->hydrogen()->deoptimize_on_undefined(),
4485 deoptimize_on_minus_zero,
4486 instr->environment());
4490 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4491 LOperand* input = instr->value();
4492 ASSERT(input->IsDoubleRegister());
4493 LOperand* result = instr->result();
4494 ASSERT(result->IsRegister());
4496 XMMRegister input_reg = ToDoubleRegister(input);
4499 if (instr->truncating()) {
4502 __ cvttsd2si(result_reg, Operand(input_reg));
4503 __ cmp(result_reg, 0x80000000u);
4506 CpuFeatures::Scope scope(
SSE3);
4507 Label convert, done;
4510 __ movdbl(Operand(
esp, 0), input_reg);
4514 const uint32_t kTooBigExponent =
4516 __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
4517 __ j(
less, &convert, Label::kNear);
4522 __ fld_d(Operand(
esp, 0));
4523 __ fisttp_d(Operand(
esp, 0));
4524 __ mov(result_reg, Operand(
esp, 0));
4529 Register temp_reg =
ToRegister(instr->temp());
4530 XMMRegister xmm_scratch =
xmm0;
4537 __ pshufd(xmm_scratch, input_reg, 1);
4538 __ movd(Operand(temp_reg), xmm_scratch);
4539 __ mov(result_reg, temp_reg);
4551 __ sub(Operand(result_reg),
4557 DeoptimizeIf(
greater, instr->environment());
4563 ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
4566 __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
4568 __ por(input_reg, xmm_scratch);
4572 __ movd(xmm_scratch, Operand(result_reg));
4575 __ psrlq(input_reg, xmm_scratch);
4576 __ movd(Operand(result_reg), input_reg);
4579 __ xor_(result_reg, Operand(temp_reg));
4580 __ sub(result_reg, Operand(temp_reg));
4585 __ cvttsd2si(result_reg, Operand(input_reg));
4586 __ cvtsi2sd(
xmm0, Operand(result_reg));
4587 __ ucomisd(
xmm0, input_reg);
4588 DeoptimizeIf(
not_equal, instr->environment());
4593 __ test(result_reg, Operand(result_reg));
4595 __ movmskpd(result_reg, input_reg);
4599 __ and_(result_reg, 1);
4600 DeoptimizeIf(
not_zero, instr->environment());
4607 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4608 LOperand* input = instr->value();
4610 DeoptimizeIf(
not_zero, instr->environment());
4614 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4615 LOperand* input = instr->value();
4617 DeoptimizeIf(zero, instr->environment());
4621 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4627 if (instr->hydrogen()->is_interval_check()) {
4630 instr->hydrogen()->GetCheckInterval(&first, &last);
4633 static_cast<int8_t>(first));
4636 if (first == last) {
4637 DeoptimizeIf(
not_equal, instr->environment());
4639 DeoptimizeIf(
below, instr->environment());
4643 static_cast<int8_t>(last));
4644 DeoptimizeIf(
above, instr->environment());
4650 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4655 DeoptimizeIf(tag == 0 ?
not_zero : zero, instr->environment());
4658 __ and_(temp, mask);
4660 DeoptimizeIf(
not_equal, instr->environment());
4666 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4667 Handle<JSFunction> target = instr->hydrogen()->target();
4668 if (isolate()->heap()->InNewSpace(*target)) {
4670 Handle<JSGlobalPropertyCell> cell =
4671 isolate()->factory()->NewJSGlobalPropertyCell(target);
4672 __ cmp(reg, Operand::Cell(cell));
4674 Operand operand = ToOperand(instr->value());
4675 __ cmp(operand, target);
4677 DeoptimizeIf(
not_equal, instr->environment());
4681 void LCodeGen::DoCheckMapCommon(Register reg,
4684 LEnvironment* env) {
4686 __ CompareMap(reg, map, &success, mode);
4692 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4693 LOperand* input = instr->value();
4694 ASSERT(input->IsRegister());
4698 SmallMapList* map_set = instr->hydrogen()->map_set();
4699 for (
int i = 0; i < map_set->length() - 1; i++) {
4700 Handle<Map> map = map_set->at(i);
4704 Handle<Map> map = map_set->last();
4710 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4711 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
4712 Register result_reg =
ToRegister(instr->result());
4713 __ ClampDoubleToUint8(value_reg,
xmm0, result_reg);
4717 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4718 ASSERT(instr->unclamped()->Equals(instr->result()));
4719 Register value_reg =
ToRegister(instr->result());
4720 __ ClampUint8(value_reg);
4724 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4725 ASSERT(instr->unclamped()->Equals(instr->result()));
4726 Register input_reg =
ToRegister(instr->unclamped());
4727 Label is_smi, done, heap_number;
4729 __ JumpIfSmi(input_reg, &is_smi);
4733 factory()->heap_number_map());
4734 __ j(
equal, &heap_number, Label::kNear);
4738 __ cmp(input_reg, factory()->undefined_value());
4739 DeoptimizeIf(
not_equal, instr->environment());
4740 __ mov(input_reg, 0);
4741 __ jmp(&done, Label::kNear);
4744 __ bind(&heap_number);
4746 __ ClampDoubleToUint8(
xmm0,
xmm1, input_reg);
4747 __ jmp(&done, Label::kNear);
4751 __ SmiUntag(input_reg);
4752 __ ClampUint8(input_reg);
4758 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4761 Handle<JSObject> holder = instr->holder();
4762 Handle<JSObject> current_prototype = instr->prototype();
4765 __ LoadHeapObject(reg, current_prototype);
4768 while (!current_prototype.is_identical_to(holder)) {
4769 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4773 Handle<JSObject>(
JSObject::cast(current_prototype->GetPrototype()));
4775 __ LoadHeapObject(reg, current_prototype);
4779 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4784 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4785 class DeferredAllocateObject:
public LDeferredCode {
4787 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4788 : LDeferredCode(codegen), instr_(instr) { }
4789 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4790 virtual LInstruction* instr() {
return instr_; }
4792 LAllocateObject* instr_;
4795 DeferredAllocateObject* deferred =
4796 new(zone()) DeferredAllocateObject(
this, instr);
4798 Register result =
ToRegister(instr->result());
4799 Register scratch =
ToRegister(instr->temp());
4800 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4801 Handle<Map> initial_map(constructor->initial_map());
4802 int instance_size = initial_map->instance_size();
4803 ASSERT(initial_map->pre_allocated_property_fields() +
4804 initial_map->unused_property_fields() -
4805 initial_map->inobject_properties() == 0);
4810 ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4811 __ AllocateInNewSpace(instance_size,
4818 __ bind(deferred->exit());
4819 if (FLAG_debug_code) {
4820 Label is_in_new_space;
4821 __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4822 __ Abort(
"Allocated object is not in new-space");
4823 __ bind(&is_in_new_space);
4827 Register map = scratch;
4828 __ LoadHeapObject(scratch, constructor);
4831 if (FLAG_debug_code) {
4832 __ AssertNotSmi(map);
4835 __ Assert(
equal,
"Unexpected instance size");
4837 initial_map->pre_allocated_property_fields());
4838 __ Assert(
equal,
"Unexpected pre-allocated property fields count");
4840 initial_map->unused_property_fields());
4841 __ Assert(
equal,
"Unexpected unused property fields count");
4843 initial_map->inobject_properties());
4844 __ Assert(
equal,
"Unexpected in-object property fields count");
4850 __ mov(scratch, factory()->empty_fixed_array());
4853 if (initial_map->inobject_properties() != 0) {
4854 __ mov(scratch, factory()->undefined_value());
4855 for (
int i = 0; i < initial_map->inobject_properties(); i++) {
4863 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4864 Register result =
ToRegister(instr->result());
4865 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4866 Handle<Map> initial_map(constructor->initial_map());
4867 int instance_size = initial_map->instance_size();
4872 __ Set(result, Immediate(0));
4874 PushSafepointRegistersScope scope(
this);
4876 CallRuntimeFromDeferred(
4877 Runtime::kAllocateInNewSpace, 1, instr, instr->context());
4878 __ StoreToSafepointRegisterSlot(result,
eax);
4882 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4884 Handle<FixedArray> literals(instr->environment()->closure()->literals());
4886 instr->hydrogen()->boilerplate_elements_kind();
4892 boilerplate_elements_kind,
true)) {
4893 __ LoadHeapObject(
eax, instr->hydrogen()->boilerplate_object());
4901 DeoptimizeIf(
not_equal, instr->environment());
4905 __ PushHeapObject(literals);
4906 __ push(Immediate(
Smi::FromInt(instr->hydrogen()->literal_index())));
4909 __ push(Immediate(isolate()->factory()->empty_fixed_array()));
4912 int length = instr->hydrogen()->length();
4913 if (instr->hydrogen()->IsCopyOnWrite()) {
4914 ASSERT(instr->hydrogen()->depth() == 1);
4917 FastCloneShallowArrayStub stub(mode, length);
4918 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4919 }
else if (instr->hydrogen()->depth() > 1) {
4920 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4922 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4928 FastCloneShallowArrayStub stub(mode, length);
4929 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4934 void LCodeGen::EmitDeepCopy(Handle<JSObject>
object,
4941 if (FLAG_debug_code) {
4942 __ LoadHeapObject(
ecx,
object);
4943 __ cmp(source,
ecx);
4944 __ Assert(
equal,
"Unexpected object literal boilerplate");
4946 __ cmp(
ecx, Handle<Map>(object->map()));
4947 __ Assert(
equal,
"Unexpected boilerplate map");
4951 __ Assert(
equal,
"Unexpected boilerplate elements kind");
4955 Handle<FixedArrayBase> elements(object->elements());
4956 bool has_elements = elements->length() > 0 &&
4957 elements->map() != isolate()->heap()->fixed_cow_array_map();
4961 int object_offset = *offset;
4962 int object_size =
object->map()->instance_size();
4963 int elements_offset = *offset + object_size;
4964 int elements_size = has_elements ? elements->Size() : 0;
4965 *offset += object_size + elements_size;
4968 ASSERT(object->properties()->length() == 0);
4969 int inobject_properties =
object->map()->inobject_properties();
4970 int header_size = object_size - inobject_properties *
kPointerSize;
4973 __ lea(
ecx, Operand(result, elements_offset));
4981 for (
int i = 0; i < inobject_properties; i++) {
4982 int total_offset = object_offset +
object->GetInObjectPropertyOffset(i);
4983 Handle<Object> value = Handle<Object>(
object->InObjectPropertyAt(i));
4984 if (value->IsJSObject()) {
4986 __ lea(
ecx, Operand(result, *offset));
4988 __ LoadHeapObject(source, value_object);
4989 EmitDeepCopy(value_object, result, source, offset);
4990 }
else if (value->IsHeapObject()) {
4991 __ LoadHeapObject(
ecx, Handle<HeapObject>::cast(value));
5000 __ LoadHeapObject(source, elements);
5007 int elements_length = elements->length();
5008 if (elements->IsFixedDoubleArray()) {
5009 Handle<FixedDoubleArray> double_array =
5011 for (
int i = 0; i < elements_length; i++) {
5012 int64_t value = double_array->get_representation(i);
5017 __ mov(
FieldOperand(result, total_offset), Immediate(value_low));
5018 __ mov(
FieldOperand(result, total_offset + 4), Immediate(value_high));
5020 }
else if (elements->IsFixedArray()) {
5022 for (
int i = 0; i < elements_length; i++) {
5024 Handle<Object> value(fast_elements->get(i));
5025 if (value->IsJSObject()) {
5027 __ lea(
ecx, Operand(result, *offset));
5029 __ LoadHeapObject(source, value_object);
5030 EmitDeepCopy(value_object, result, source, offset);
5031 }
else if (value->IsHeapObject()) {
5032 __ LoadHeapObject(
ecx, Handle<HeapObject>::cast(value));
5045 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
5047 int size = instr->hydrogen()->total_size();
5049 instr->hydrogen()->boilerplate()->GetElementsKind();
5055 boilerplate_elements_kind,
true)) {
5056 __ LoadHeapObject(
ebx, instr->hydrogen()->boilerplate());
5064 DeoptimizeIf(
not_equal, instr->environment());
5069 Label allocated, runtime_allocate;
5073 __ bind(&runtime_allocate);
5075 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5077 __ bind(&allocated);
5079 __ LoadHeapObject(
ebx, instr->hydrogen()->boilerplate());
5080 EmitDeepCopy(instr->hydrogen()->boilerplate(),
eax,
ebx, &offset);
5085 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
5087 Handle<FixedArray> literals(instr->environment()->closure()->literals());
5088 Handle<FixedArray> constant_properties =
5089 instr->hydrogen()->constant_properties();
5092 __ PushHeapObject(literals);
5093 __ push(Immediate(
Smi::FromInt(instr->hydrogen()->literal_index())));
5094 __ push(Immediate(constant_properties));
5095 int flags = instr->hydrogen()->fast_elements()
5098 flags |= instr->hydrogen()->has_function()
5104 int properties_count = constant_properties->length() / 2;
5105 if (instr->hydrogen()->depth() > 1) {
5106 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
5109 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
5111 FastCloneShallowObjectStub stub(properties_count);
5112 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5117 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5120 CallRuntime(Runtime::kToFastProperties, 1, instr);
5124 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5132 int literal_offset =
5134 __ LoadHeapObject(
ecx, instr->hydrogen()->literals());
5136 __ cmp(
ebx, factory()->undefined_value());
5142 __ push(Immediate(
Smi::FromInt(instr->hydrogen()->literal_index())));
5143 __ push(Immediate(instr->hydrogen()->pattern()));
5144 __ push(Immediate(instr->hydrogen()->flags()));
5145 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5148 __ bind(&materialized);
5150 Label allocated, runtime_allocate;
5154 __ bind(&runtime_allocate);
5157 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5160 __ bind(&allocated);
5169 if ((size % (2 * kPointerSize)) != 0) {
5176 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5180 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
5181 bool pretenure = instr->hydrogen()->pretenure();
5182 if (!pretenure && shared_info->num_literals() == 0) {
5183 FastNewClosureStub stub(shared_info->language_mode());
5184 __ push(Immediate(shared_info));
5185 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5188 __ push(Immediate(shared_info));
5189 __ push(Immediate(pretenure
5190 ? factory()->true_value()
5191 : factory()->false_value()));
5192 CallRuntime(Runtime::kNewClosure, 3, instr);
5197 void LCodeGen::DoTypeof(LTypeof* instr) {
5198 LOperand* input = instr->value();
5199 EmitPushTaggedOperand(input);
5200 CallRuntime(Runtime::kTypeof, 1, instr);
5204 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5206 int true_block = chunk_->LookupDestination(instr->true_block_id());
5207 int false_block = chunk_->LookupDestination(instr->false_block_id());
5208 Label* true_label = chunk_->GetAssemblyLabel(true_block);
5209 Label* false_label = chunk_->GetAssemblyLabel(false_block);
5212 EmitTypeofIs(true_label, false_label, input, instr->type_literal());
5214 EmitBranch(true_block, false_block, final_branch_condition);
5219 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5222 Handle<String> type_name) {
5224 if (type_name->Equals(heap()->number_symbol())) {
5225 __ JumpIfSmi(input, true_label);
5227 factory()->heap_number_map());
5228 final_branch_condition =
equal;
5230 }
else if (type_name->Equals(heap()->string_symbol())) {
5231 __ JumpIfSmi(input, false_label);
5236 final_branch_condition =
zero;
5238 }
else if (type_name->Equals(heap()->boolean_symbol())) {
5239 __ cmp(input, factory()->true_value());
5241 __ cmp(input, factory()->false_value());
5242 final_branch_condition =
equal;
5244 }
else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
5245 __ cmp(input, factory()->null_value());
5246 final_branch_condition =
equal;
5248 }
else if (type_name->Equals(heap()->undefined_symbol())) {
5249 __ cmp(input, factory()->undefined_value());
5251 __ JumpIfSmi(input, false_label);
5258 }
else if (type_name->Equals(heap()->function_symbol())) {
5260 __ JumpIfSmi(input, false_label);
5264 final_branch_condition =
equal;
5266 }
else if (type_name->Equals(heap()->object_symbol())) {
5267 __ JumpIfSmi(input, false_label);
5268 if (!FLAG_harmony_typeof) {
5269 __ cmp(input, factory()->null_value());
5279 final_branch_condition =
zero;
5282 __ jmp(false_label);
5284 return final_branch_condition;
5288 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5290 int true_block = chunk_->LookupDestination(instr->true_block_id());
5291 int false_block = chunk_->LookupDestination(instr->false_block_id());
5293 EmitIsConstructCall(temp);
5294 EmitBranch(true_block, false_block,
equal);
5298 void LCodeGen::EmitIsConstructCall(Register temp) {
5303 Label check_frame_marker;
5306 __ j(
not_equal, &check_frame_marker, Label::kNear);
5310 __ bind(&check_frame_marker);
5316 void LCodeGen::EnsureSpaceForLazyDeopt() {
5319 int current_pc = masm()->pc_offset();
5321 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5322 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5323 __ Nop(padding_size);
5325 last_lazy_deopt_pc_ = masm()->pc_offset();
5329 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5330 EnsureSpaceForLazyDeopt();
5331 ASSERT(instr->HasEnvironment());
5332 LEnvironment* env = instr->environment();
5333 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5334 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5338 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5343 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5344 LOperand* obj = instr->object();
5345 LOperand* key = instr->key();
5346 __ push(ToOperand(obj));
5347 EmitPushTaggedOperand(key);
5348 ASSERT(instr->HasPointerMap());
5349 LPointerMap* pointers = instr->pointer_map();
5350 RecordPosition(pointers->position());
5354 SafepointGenerator safepoint_generator(
5355 this, pointers, Safepoint::kLazyDeopt);
5357 __ InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, safepoint_generator);
5361 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5362 PushSafepointRegistersScope scope(
this);
5364 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5365 RecordSafepointWithLazyDeopt(
5366 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5367 ASSERT(instr->HasEnvironment());
5368 LEnvironment* env = instr->environment();
5369 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5373 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5374 class DeferredStackCheck:
public LDeferredCode {
5376 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5377 : LDeferredCode(codegen), instr_(instr) { }
5378 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5379 virtual LInstruction* instr() {
return instr_; }
5381 LStackCheck* instr_;
5384 ASSERT(instr->HasEnvironment());
5385 LEnvironment* env = instr->environment();
5388 if (instr->hydrogen()->is_function_entry()) {
5391 ExternalReference stack_limit =
5392 ExternalReference::address_of_stack_limit(isolate());
5393 __ cmp(
esp, Operand::StaticVariable(stack_limit));
5396 ASSERT(instr->context()->IsRegister());
5398 StackCheckStub stub;
5399 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5400 EnsureSpaceForLazyDeopt();
5402 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5403 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5405 ASSERT(instr->hydrogen()->is_backwards_branch());
5407 DeferredStackCheck* deferred_stack_check =
5408 new(zone()) DeferredStackCheck(
this, instr);
5409 ExternalReference stack_limit =
5410 ExternalReference::address_of_stack_limit(isolate());
5411 __ cmp(
esp, Operand::StaticVariable(stack_limit));
5412 __ j(
below, deferred_stack_check->entry());
5413 EnsureSpaceForLazyDeopt();
5414 __ bind(instr->done_label());
5415 deferred_stack_check->SetExit(instr->done_label());
5416 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5424 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5428 LEnvironment* environment = instr->environment();
5429 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5430 instr->SpilledDoubleRegisterArray());
5434 ASSERT(!environment->HasBeenRegistered());
5435 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5436 ASSERT(osr_pc_offset_ == -1);
5437 osr_pc_offset_ = masm()->pc_offset();
5441 void LCodeGen::DoIn(LIn* instr) {
5442 LOperand* obj = instr->object();
5443 LOperand* key = instr->key();
5444 EmitPushTaggedOperand(key);
5445 EmitPushTaggedOperand(obj);
5446 ASSERT(instr->HasPointerMap());
5447 LPointerMap* pointers = instr->pointer_map();
5448 RecordPosition(pointers->position());
5449 SafepointGenerator safepoint_generator(
5450 this, pointers, Safepoint::kLazyDeopt);
5455 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5456 __ cmp(
eax, isolate()->factory()->undefined_value());
5457 DeoptimizeIf(
equal, instr->environment());
5459 __ cmp(
eax, isolate()->factory()->null_value());
5460 DeoptimizeIf(
equal, instr->environment());
5463 DeoptimizeIf(zero, instr->environment());
5469 Label use_cache, call_runtime;
5470 __ CheckEnumCache(&call_runtime);
5473 __ jmp(&use_cache, Label::kNear);
5476 __ bind(&call_runtime);
5478 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5481 isolate()->factory()->meta_map());
5482 DeoptimizeIf(
not_equal, instr->environment());
5483 __ bind(&use_cache);
5487 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5489 Register result =
ToRegister(instr->result());
5490 Label load_cache, done;
5491 __ EnumLength(result, map);
5494 __ mov(result, isolate()->factory()->empty_fixed_array());
5497 __ bind(&load_cache);
5498 __ LoadInstanceDescriptors(map, result);
5504 __ test(result, result);
5505 DeoptimizeIf(
equal, instr->environment());
5509 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5510 Register
object =
ToRegister(instr->value());
5513 DeoptimizeIf(
not_equal, instr->environment());
5517 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5518 Register
object =
ToRegister(instr->object());
5521 Label out_of_object, done;
5522 __ cmp(index, Immediate(0));
5523 __ j(
less, &out_of_object);
5528 __ jmp(&done, Label::kNear);
5530 __ bind(&out_of_object);
5537 FixedArray::kHeaderSize - kPointerSize));
5546 #endif // V8_TARGET_ARCH_IA32
static const int kCallerFPOffset
static const int kElementsKindMask
static const int kLengthOffset
static const int kBitFieldOffset
static LGap * cast(LInstruction *instr)
const intptr_t kSmiTagMask
static const int kCodeEntryOffset
static const int kMaxAsciiCharCode
static const int kPrototypeOrInitialMapOffset
static int SlotOffset(int index)
virtual void AfterCall() const
static const int kEnumCacheOffset
static const uint32_t kExponentMask
static Smi * FromInt(int value)
bool IsFastObjectElementsKind(ElementsKind kind)
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
static const int kGlobalReceiverOffset
static const int kNativeByteOffset
static const int kExponentBias
static XMMRegister FromAllocationIndex(int index)
static bool IsSupported(CpuFeature f)
static const int kStrictModeBitWithinByte
const int kNoAlignmentPadding
static const int kExternalPointerOffset
virtual ~SafepointGenerator()
static const int kCallerSPOffset
#define ASSERT(condition)
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
#define ASSERT_GE(v1, v2)
const int kPointerSizeLog2
static const int kInstanceSizeOffset
static const int kInObjectFieldCount
static const int kStressDeoptCounterOffset
static const int kMaximumSlots
static const int kInstanceClassNameOffset
static const int kUnusedPropertyFieldsOffset
int WhichPowerOf2(uint32_t x)
bool is_uint32(int64_t x)
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
const int kAlignmentPaddingPushed
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
static const int kHashFieldOffset
Condition ReverseCondition(Condition cond)
const uint32_t kSlotsZapValue
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
static const int kExponentShift
static const int kValueOffset
const uint32_t kHoleNanUpper32
static void MaybeCallEntryHook(MacroAssembler *masm)
Operand FieldOperand(Register object, int offset)
static LConstantOperand * cast(LOperand *op)
const uint32_t kHoleNanLower32
static Register FromAllocationIndex(int index)
static const int kDynamicAlignmentStateOffset
static const int kCacheStampOffset
static const int kPropertiesOffset
const int kAlignmentZapValue
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
static const int kInObjectPropertiesOffset
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static void EnsureRelocSpaceForLazyDeoptimization(Handle< Code > code)
static const int kElementsOffset
static const int kNativeBitWithinByte
static const int kContainsCachedArrayIndexMask
static Vector< T > New(int length)
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
static int OffsetOfElementAt(int index)
static const int kLengthOffset
static int SizeFor(int length)
static const int kHeaderSize
static const int kMapOffset
static const int kValueOffset
bool is(Register reg) const
static const int kLengthOffset
static Address GetDeoptimizationEntry(int id, BailoutType type)
static const int kHasNonInstancePrototype
static const int kContextOffset
static const int kFunctionOffset
ElementsKind GetInitialFastElementsKind()
static const uint32_t kSignMask
static const int kStrictModeByteOffset
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
static const int kElementsKindShift
static const int kConstructorOffset
#define ASSERT_NE(v1, v2)
static const int kIsUndetectable
static const int kHeaderSize
static const int kMaximumClonedProperties
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static const int kPrototypeOffset
#define RUNTIME_ENTRY(name, nargs, ressize)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kMaxLength
static const int kValueOffset
static const int kNativeContextOffset
static const int kMarkerOffset
static const int kExponentBits
static const int kSharedFunctionInfoOffset
Register ToRegister(int num)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
static const int kMaxValue
static const int kMantissaBits
static const int kBitField2Offset
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
static const int kMaximumClonedLength
static const int kExponentOffset
bool EvalComparison(Token::Value op, double op1, double op2)
static JSObject * cast(Object *obj)
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
virtual void BeforeCall(int call_size) const
static const int kPreAllocatedPropertyFieldsOffset
static const int kMantissaOffset