30 #if V8_TARGET_ARCH_X64
43 class SafepointGenerator
V8_FINAL :
public CallWrapper {
45 SafepointGenerator(LCodeGen* codegen,
46 LPointerMap* pointers,
47 Safepoint::DeoptMode
mode)
51 virtual ~SafepointGenerator() {}
53 virtual void BeforeCall(
int call_size)
const V8_OVERRIDE {}
56 codegen_->RecordSafepoint(pointers_, deopt_mode_);
61 LPointerMap* pointers_;
62 Safepoint::DeoptMode deopt_mode_;
69 LPhase phase(
"Z_Code generation", chunk());
76 FrameScope frame_scope(masm_, StackFrame::MANUAL);
78 return GeneratePrologue() &&
80 GenerateDeferredCode() &&
81 GenerateJumpTable() &&
82 GenerateSafepointTable();
88 code->set_stack_slots(GetStackSlotCount());
89 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
91 PopulateDeoptimizationData(code);
92 info()->CommitDependencies(code);
97 info()->set_bailout_reason(reason);
103 void LCodeGen::MakeSureStackPagesMapped(
int offset) {
104 const int kPageSize = 4 *
KB;
105 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
112 void LCodeGen::SaveCallerDoubles() {
115 Comment(
";;; Save clobbered callee double registers");
117 BitVector* doubles = chunk()->allocated_double_registers();
118 BitVector::Iterator save_iterator(doubles);
119 while (!save_iterator.Done()) {
122 save_iterator.Advance();
128 void LCodeGen::RestoreCallerDoubles() {
131 Comment(
";;; Restore clobbered callee double registers");
132 BitVector* doubles = chunk()->allocated_double_registers();
133 BitVector::Iterator save_iterator(doubles);
135 while (!save_iterator.Done()) {
138 save_iterator.Advance();
144 bool LCodeGen::GeneratePrologue() {
147 if (
info()->IsOptimizing()) {
151 if (strlen(FLAG_stop_at) > 0 &&
152 info_->function()->name()->IsUtf8EqualTo(
CStrVector(FLAG_stop_at))) {
159 if (info_->this_has_uses() &&
160 info_->strict_mode() ==
SLOPPY &&
161 !info_->is_native()) {
163 StackArgumentsAccessor args(
rsp, scope()->num_parameters());
164 __ movp(
rcx, args.GetReceiverOperand());
166 __ CompareRoot(
rcx, Heap::kUndefinedValueRootIndex);
172 __ movp(args.GetReceiverOperand(),
rcx);
178 info()->set_prologue_offset(masm_->pc_offset());
181 frame_is_built_ =
true;
183 info()->AddNoFrameRange(0, masm_->pc_offset());
187 int slots = GetStackSlotCount();
189 if (FLAG_debug_code) {
211 if (
info()->saves_caller_doubles()) {
218 if (heap_slots > 0) {
219 Comment(
";;; Allocate local context");
221 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
222 FastNewContextStub stub(heap_slots);
226 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
228 RecordSafepoint(Safepoint::kNoLazyDeopt);
236 for (
int i = 0; i < num_parameters; i++) {
238 if (var->IsContextSlot()) {
242 __ movp(
rax, Operand(
rbp, parameter_offset));
245 __ movp(Operand(
rsi, context_offset),
rax);
250 Comment(
";;; End allocate local context");
254 if (FLAG_trace &&
info()->IsOptimizing()) {
255 __ CallRuntime(Runtime::kTraceEnter, 0);
257 return !is_aborted();
261 void LCodeGen::GenerateOsrPrologue() {
264 if (osr_pc_offset_ >= 0)
return;
266 osr_pc_offset_ = masm()->pc_offset();
270 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
276 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
277 if (instr->IsCall()) {
280 if (!instr->IsLazyBailout() && !instr->IsGap()) {
281 safepoints_.BumpLastLazySafepointIndex();
286 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
287 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
288 if (instr->result()->IsRegister()) {
289 Register result_reg =
ToRegister(instr->result());
290 __ movsxlq(result_reg, result_reg);
293 ASSERT(instr->result()->IsStackSlot());
294 Operand src =
ToOperand(instr->result());
302 bool LCodeGen::GenerateJumpTable() {
304 if (jump_table_.length() > 0) {
305 Comment(
";;; -------------------- Jump table --------------------");
307 for (
int i = 0; i < jump_table_.length(); i++) {
308 __ bind(&jump_table_[i].label);
309 Address entry = jump_table_[i].address;
313 Comment(
";;; jump table entry %d.", i);
315 Comment(
";;; jump table entry %d: deoptimization bailout %d.", i,
id);
317 if (jump_table_[i].needs_frame) {
320 if (needs_frame.is_bound()) {
321 __ jmp(&needs_frame);
323 __ bind(&needs_frame);
338 if (
info()->saves_caller_doubles()) {
340 RestoreCallerDoubles();
345 return !is_aborted();
349 bool LCodeGen::GenerateDeferredCode() {
351 if (deferred_.length() > 0) {
352 for (
int i = 0; !is_aborted() && i < deferred_.length(); i++) {
356 instructions_->at(code->instruction_index())->hydrogen_value();
357 RecordAndWritePosition(
358 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
360 Comment(
";;; <@%d,#%d> "
361 "-------------------- Deferred %s --------------------",
362 code->instruction_index(),
363 code->instr()->hydrogen_value()->id(),
364 code->instr()->Mnemonic());
365 __ bind(code->entry());
367 Comment(
";;; Build frame");
370 frame_is_built_ =
true;
376 Comment(
";;; Deferred code");
380 __ bind(code->done());
381 Comment(
";;; Destroy frame");
383 frame_is_built_ =
false;
387 __ jmp(code->exit());
393 if (!is_aborted()) status_ =
DONE;
394 return !is_aborted();
398 bool LCodeGen::GenerateSafepointTable() {
400 safepoints_.Emit(masm(), GetStackSlotCount());
401 return !is_aborted();
422 ASSERT(op->IsDoubleRegister());
428 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
433 return op->IsConstantOperand() &&
434 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
439 return chunk_->LookupLiteralRepresentation(op).IsSmi();
444 HConstant* constant = chunk_->LookupConstant(op);
445 return constant->Integer32Value();
450 HConstant* constant = chunk_->LookupConstant(op);
456 HConstant* constant = chunk_->LookupConstant(op);
457 ASSERT(constant->HasDoubleValue());
458 return constant->DoubleValue();
462 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op)
const {
463 HConstant* constant = chunk_->LookupConstant(op);
464 ASSERT(constant->HasExternalReferenceValue());
465 return constant->ExternalReferenceValue();
470 HConstant* constant = chunk_->LookupConstant(op);
471 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
472 return constant->handle(isolate());
476 static int ArgumentsOffsetWithoutFrame(
int index) {
485 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
491 return Operand(
rsp, ArgumentsOffsetWithoutFrame(op->index()));
497 Translation* translation) {
498 if (environment ==
NULL)
return;
501 int translation_size = environment->translation_size();
503 int height = translation_size - environment->parameter_count();
506 bool has_closure_id = !
info()->closure().is_null() &&
507 !
info()->closure().is_identical_to(environment->closure());
508 int closure_id = has_closure_id
509 ? DefineDeoptimizationLiteral(environment->closure())
510 : Translation::kSelfLiteralId;
512 switch (environment->frame_type()) {
514 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
517 translation->BeginConstructStubFrame(closure_id, translation_size);
520 ASSERT(translation_size == 1);
522 translation->BeginGetterStubFrame(closure_id);
525 ASSERT(translation_size == 2);
527 translation->BeginSetterStubFrame(closure_id);
530 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
533 translation->BeginCompiledStubFrame();
537 int object_index = 0;
538 int dematerialized_index = 0;
539 for (
int i = 0; i < translation_size; ++i) {
540 LOperand* value = environment->values()->at(i);
541 AddToTranslation(environment,
544 environment->HasTaggedValueAt(i),
545 environment->HasUint32ValueAt(i),
547 &dematerialized_index);
552 void LCodeGen::AddToTranslation(LEnvironment* environment,
553 Translation* translation,
557 int* object_index_pointer,
558 int* dematerialized_index_pointer) {
559 if (op == LEnvironment::materialization_marker()) {
560 int object_index = (*object_index_pointer)++;
561 if (environment->ObjectIsDuplicateAt(object_index)) {
562 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
563 translation->DuplicateObject(dupe_of);
566 int object_length = environment->ObjectLengthAt(object_index);
567 if (environment->ObjectIsArgumentsAt(object_index)) {
568 translation->BeginArgumentsObject(object_length);
570 translation->BeginCapturedObject(object_length);
572 int dematerialized_index = *dematerialized_index_pointer;
573 int env_offset = environment->translation_size() + dematerialized_index;
574 *dematerialized_index_pointer += object_length;
575 for (
int i = 0; i < object_length; ++i) {
576 LOperand* value = environment->values()->at(env_offset + i);
577 AddToTranslation(environment,
580 environment->HasTaggedValueAt(env_offset + i),
581 environment->HasUint32ValueAt(env_offset + i),
582 object_index_pointer,
583 dematerialized_index_pointer);
588 if (op->IsStackSlot()) {
590 translation->StoreStackSlot(op->index());
591 }
else if (is_uint32) {
592 translation->StoreUint32StackSlot(op->index());
594 translation->StoreInt32StackSlot(op->index());
596 }
else if (op->IsDoubleStackSlot()) {
597 translation->StoreDoubleStackSlot(op->index());
598 }
else if (op->IsRegister()) {
601 translation->StoreRegister(reg);
602 }
else if (is_uint32) {
603 translation->StoreUint32Register(reg);
605 translation->StoreInt32Register(reg);
607 }
else if (op->IsDoubleRegister()) {
609 translation->StoreDoubleRegister(reg);
610 }
else if (op->IsConstantOperand()) {
611 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
612 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
613 translation->StoreLiteral(src_index);
620 void LCodeGen::CallCodeGeneric(Handle<Code> code,
621 RelocInfo::Mode
mode,
623 SafepointMode safepoint_mode,
627 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
631 if (code->kind() == Code::BINARY_OP_IC ||
632 code->kind() == Code::COMPARE_IC) {
638 void LCodeGen::CallCode(Handle<Code> code,
639 RelocInfo::Mode mode,
640 LInstruction* instr) {
641 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
645 void LCodeGen::CallRuntime(
const Runtime::Function*
function,
650 ASSERT(instr->HasPointerMap());
652 __ CallRuntime(
function, num_arguments, save_doubles);
654 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
658 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
659 if (context->IsRegister()) {
663 }
else if (context->IsStackSlot()) {
665 }
else if (context->IsConstantOperand()) {
666 HConstant* constant =
667 chunk_->LookupConstant(LConstantOperand::cast(context));
668 __ Move(
rsi, Handle<Object>::cast(constant->handle(isolate())));
680 LoadContextFromDeferred(context);
682 __ CallRuntimeSaveDoubles(
id);
683 RecordSafepointWithRegisters(
684 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
688 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
689 Safepoint::DeoptMode mode) {
690 if (!environment->HasBeenRegistered()) {
705 int jsframe_count = 0;
712 Translation translation(&translations_, frame_count, jsframe_count, zone());
714 int deoptimization_index = deoptimizations_.length();
715 int pc_offset = masm()->pc_offset();
716 environment->Register(deoptimization_index,
718 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
719 deoptimizations_.
Add(environment, environment->zone());
725 LEnvironment* environment,
727 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
728 ASSERT(environment->HasBeenRegistered());
729 int id = environment->deoptimization_index();
734 Abort(kBailoutWasNotPrepared);
738 if (DeoptEveryNTimes()) {
739 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
744 __ movl(
rax, count_operand);
745 __ subl(
rax, Immediate(1));
747 if (FLAG_trap_on_deopt)
__ int3();
748 __ movl(
rax, Immediate(FLAG_deopt_every_n_times));
749 __ movl(count_operand,
rax);
755 __ movl(count_operand,
rax);
760 if (
info()->ShouldTrapOnDeopt()) {
773 !
info()->saves_caller_doubles()) {
778 if (jump_table_.is_empty() ||
779 jump_table_.last().address != entry ||
780 jump_table_.last().needs_frame != !frame_is_built_ ||
781 jump_table_.last().bailout_type != bailout_type) {
782 Deoptimizer::JumpTableEntry table_entry(entry,
785 jump_table_.Add(table_entry, zone());
788 __ jmp(&jump_table_.last().label);
790 __ j(cc, &jump_table_.last().label);
796 void LCodeGen::DeoptimizeIf(
Condition cc,
797 LEnvironment* environment) {
801 DeoptimizeIf(cc, environment, bailout_type);
805 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
806 int length = deoptimizations_.length();
807 if (length == 0)
return;
808 Handle<DeoptimizationInputData> data =
809 factory()->NewDeoptimizationInputData(length,
TENURED);
811 Handle<ByteArray> translations =
812 translations_.CreateByteArray(isolate()->factory());
813 data->SetTranslationByteArray(*translations);
814 data->SetInlinedFunctionCount(
Smi::FromInt(inlined_function_count_));
815 data->SetOptimizationId(
Smi::FromInt(info_->optimization_id()));
816 if (info_->IsOptimizing()) {
819 data->SetSharedFunctionInfo(*info_->shared_info());
825 factory()->NewFixedArray(deoptimization_literals_.length(),
TENURED);
827 for (
int i = 0; i < deoptimization_literals_.length(); i++) {
828 literals->set(i, *deoptimization_literals_[i]);
830 data->SetLiteralArray(*literals);
833 data->SetOsrAstId(
Smi::FromInt(info_->osr_ast_id().ToInt()));
837 for (
int i = 0; i < length; i++) {
839 data->SetAstId(i, env->ast_id());
840 data->SetTranslationIndex(i,
Smi::FromInt(env->translation_index()));
841 data->SetArgumentsStackHeight(i,
845 code->set_deoptimization_data(*data);
849 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
850 int result = deoptimization_literals_.length();
851 for (
int i = 0; i < deoptimization_literals_.length(); ++i) {
852 if (deoptimization_literals_[i].is_identical_to(literal))
return i;
854 deoptimization_literals_.Add(literal, zone());
859 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
860 ASSERT(deoptimization_literals_.length() == 0);
862 const ZoneList<Handle<JSFunction> >* inlined_closures =
863 chunk()->inlined_closures();
865 for (
int i = 0, length = inlined_closures->length();
868 DefineDeoptimizationLiteral(inlined_closures->at(i));
871 inlined_function_count_ = deoptimization_literals_.length();
875 void LCodeGen::RecordSafepointWithLazyDeopt(
876 LInstruction* instr, SafepointMode safepoint_mode,
int argc) {
877 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
878 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
880 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
881 RecordSafepointWithRegisters(
882 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
887 void LCodeGen::RecordSafepoint(
888 LPointerMap* pointers,
889 Safepoint::Kind kind,
891 Safepoint::DeoptMode deopt_mode) {
892 ASSERT(kind == expected_safepoint_kind_);
894 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
896 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
897 kind, arguments, deopt_mode);
898 for (
int i = 0; i < operands->length(); i++) {
899 LOperand* pointer = operands->at(i);
900 if (pointer->IsStackSlot()) {
901 safepoint.DefinePointerSlot(pointer->index(), zone());
902 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
903 safepoint.DefinePointerRegister(
ToRegister(pointer), zone());
909 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
910 Safepoint::DeoptMode deopt_mode) {
911 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
915 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
916 LPointerMap empty_pointers(zone());
917 RecordSafepoint(&empty_pointers, deopt_mode);
921 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
923 Safepoint::DeoptMode deopt_mode) {
924 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
928 void LCodeGen::RecordAndWritePosition(
int position) {
929 if (position == RelocInfo::kNoPosition)
return;
930 masm()->positions_recorder()->RecordPosition(position);
931 masm()->positions_recorder()->WriteRecordedPositions();
935 static const char* LabelType(LLabel* label) {
936 if (label->is_loop_header())
return " (loop header)";
937 if (label->is_osr_entry())
return " (OSR entry)";
942 void LCodeGen::DoLabel(LLabel* label) {
943 Comment(
";;; <@%d,#%d> -------------------- B%d%s --------------------",
944 current_instruction_,
945 label->hydrogen_value()->id(),
948 __ bind(label->label());
949 current_block_ = label->block_id();
955 resolver_.Resolve(move);
964 LParallelMove* move = gap->GetParallelMove(inner_pos);
970 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
975 void LCodeGen::DoParameter(LParameter* instr) {
980 void LCodeGen::DoCallStub(LCallStub* instr) {
983 switch (instr->hydrogen()->major_key()) {
984 case CodeStub::RegExpExec: {
986 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
989 case CodeStub::SubString: {
991 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
994 case CodeStub::StringCompare: {
995 StringCompareStub stub;
996 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1005 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1006 GenerateOsrPrologue();
1010 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1011 Register dividend =
ToRegister(instr->dividend());
1012 int32_t divisor = instr->divisor();
1021 HMod* hmod = instr->hydrogen();
1022 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1023 Label dividend_is_not_negative, done;
1025 __ testl(dividend, dividend);
1026 __ j(
not_sign, ÷nd_is_not_negative, Label::kNear);
1029 __ andl(dividend, Immediate(mask));
1032 DeoptimizeIf(
zero, instr->environment());
1034 __ jmp(&done, Label::kNear);
1037 __ bind(÷nd_is_not_negative);
1038 __ andl(dividend, Immediate(mask));
1043 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1044 Register dividend =
ToRegister(instr->dividend());
1045 int32_t divisor = instr->divisor();
1053 __ TruncatingDiv(dividend,
Abs(divisor));
1055 __ movl(
rax, dividend);
1059 HMod* hmod = instr->hydrogen();
1061 Label remainder_not_zero;
1062 __ j(
not_zero, &remainder_not_zero, Label::kNear);
1063 __ cmpl(dividend, Immediate(0));
1064 DeoptimizeIf(
less, instr->environment());
1065 __ bind(&remainder_not_zero);
1070 void LCodeGen::DoModI(LModI* instr) {
1071 HMod* hmod = instr->hydrogen();
1073 Register left_reg =
ToRegister(instr->left());
1075 Register right_reg =
ToRegister(instr->right());
1078 Register result_reg =
ToRegister(instr->result());
1085 __ testl(right_reg, right_reg);
1086 DeoptimizeIf(
zero, instr->environment());
1092 Label no_overflow_possible;
1094 __ j(
not_zero, &no_overflow_possible, Label::kNear);
1095 __ cmpl(right_reg, Immediate(-1));
1097 DeoptimizeIf(
equal, instr->environment());
1099 __ j(
not_equal, &no_overflow_possible, Label::kNear);
1100 __ Set(result_reg, 0);
1101 __ jmp(&done, Label::kNear);
1103 __ bind(&no_overflow_possible);
1112 Label positive_left;
1113 __ testl(left_reg, left_reg);
1114 __ j(
not_sign, &positive_left, Label::kNear);
1115 __ idivl(right_reg);
1116 __ testl(result_reg, result_reg);
1117 DeoptimizeIf(
zero, instr->environment());
1118 __ jmp(&done, Label::kNear);
1119 __ bind(&positive_left);
1121 __ idivl(right_reg);
1126 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1127 Register dividend =
ToRegister(instr->dividend());
1128 int32_t divisor = instr->divisor();
1133 if (divisor == 1)
return;
1136 __ sarl(dividend, Immediate(shift));
1141 Label not_kmin_int, done;
1144 DeoptimizeIf(
zero, instr->environment());
1150 if (divisor == -1) {
1153 __ movl(dividend, Immediate(
kMinInt / divisor));
1154 __ jmp(&done, Label::kNear);
1157 __ bind(¬_kmin_int);
1158 __ sarl(dividend, Immediate(shift));
1163 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1164 Register dividend =
ToRegister(instr->dividend());
1165 int32_t divisor = instr->divisor();
1174 HMathFloorOfDiv* hdiv = instr->hydrogen();
1176 __ testl(dividend, dividend);
1177 DeoptimizeIf(
zero, instr->environment());
1184 __ TruncatingDiv(dividend,
Abs(divisor));
1185 if (divisor < 0)
__ negl(
rdx);
1192 ASSERT(!temp.is(dividend) && !temp.is(
rax) && !temp.is(
rdx));
1193 Label needs_adjustment, done;
1194 __ cmpl(dividend, Immediate(0));
1195 __ j(divisor > 0 ?
less :
greater, &needs_adjustment, Label::kNear);
1196 __ TruncatingDiv(dividend,
Abs(divisor));
1197 if (divisor < 0)
__ negl(
rdx);
1198 __ jmp(&done, Label::kNear);
1199 __ bind(&needs_adjustment);
1200 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1201 __ TruncatingDiv(temp,
Abs(divisor));
1202 if (divisor < 0)
__ negl(
rdx);
1208 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1209 Register dividend =
ToRegister(instr->dividend());
1210 int32_t divisor = instr->divisor();
1211 Register result =
ToRegister(instr->result());
1213 ASSERT(!result.is(dividend));
1216 HDiv* hdiv = instr->hydrogen();
1218 __ testl(dividend, dividend);
1219 DeoptimizeIf(
zero, instr->environment());
1224 DeoptimizeIf(
zero, instr->environment());
1228 divisor != 1 && divisor != -1) {
1229 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1230 __ testl(dividend, Immediate(mask));
1231 DeoptimizeIf(
not_zero, instr->environment());
1233 __ Move(result, dividend);
1237 if (shift > 1)
__ sarl(result, Immediate(31));
1238 __ shrl(result, Immediate(32 - shift));
1239 __ addl(result, dividend);
1240 __ sarl(result, Immediate(shift));
1242 if (divisor < 0)
__ negl(result);
1246 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1247 Register dividend =
ToRegister(instr->dividend());
1248 int32_t divisor = instr->divisor();
1257 HDiv* hdiv = instr->hydrogen();
1259 __ testl(dividend, dividend);
1260 DeoptimizeIf(
zero, instr->environment());
1263 __ TruncatingDiv(dividend,
Abs(divisor));
1264 if (divisor < 0)
__ negp(
rdx);
1268 __ imull(
rax,
rax, Immediate(divisor));
1269 __ subl(
rax, dividend);
1270 DeoptimizeIf(
not_equal, instr->environment());
1275 void LCodeGen::DoDivI(LDivI* instr) {
1276 HBinaryOperation* hdiv = instr->hydrogen();
1277 Register dividend =
ToRegister(instr->left());
1278 Register divisor =
ToRegister(instr->right());
1279 Register remainder =
ToRegister(instr->temp());
1280 Register result =
ToRegister(instr->result());
1289 __ testl(divisor, divisor);
1290 DeoptimizeIf(
zero, instr->environment());
1295 Label dividend_not_zero;
1296 __ testl(dividend, dividend);
1297 __ j(
not_zero, ÷nd_not_zero, Label::kNear);
1298 __ testl(divisor, divisor);
1299 DeoptimizeIf(
sign, instr->environment());
1300 __ bind(÷nd_not_zero);
1305 Label dividend_not_min_int;
1307 __ j(
not_zero, ÷nd_not_min_int, Label::kNear);
1308 __ cmpl(divisor, Immediate(-1));
1309 DeoptimizeIf(
zero, instr->environment());
1310 __ bind(÷nd_not_min_int);
1317 if (hdiv->IsMathFloorOfDiv()) {
1319 __ testl(remainder, remainder);
1320 __ j(
zero, &done, Label::kNear);
1321 __ xorl(remainder, divisor);
1322 __ sarl(remainder, Immediate(31));
1323 __ addl(result, remainder);
1327 __ testl(remainder, remainder);
1328 DeoptimizeIf(
not_zero, instr->environment());
1333 void LCodeGen::DoMulI(LMulI* instr) {
1335 LOperand* right = instr->right();
1338 if (instr->hydrogen_value()->representation().IsSmi()) {
1347 if (right->IsConstantOperand()) {
1349 if (right_value == -1) {
1351 }
else if (right_value == 0) {
1352 __ xorl(left, left);
1353 }
else if (right_value == 2) {
1354 __ addl(left, left);
1355 }
else if (!can_overflow) {
1359 switch (right_value) {
1364 __ leal(left, Operand(left, left,
times_2, 0));
1367 __ shll(left, Immediate(2));
1370 __ leal(left, Operand(left, left,
times_4, 0));
1373 __ shll(left, Immediate(3));
1376 __ leal(left, Operand(left, left,
times_8, 0));
1379 __ shll(left, Immediate(4));
1382 __ imull(left, left, Immediate(right_value));
1386 __ imull(left, left, Immediate(right_value));
1388 }
else if (right->IsStackSlot()) {
1389 if (instr->hydrogen_value()->representation().IsSmi()) {
1390 __ SmiToInteger64(left, left);
1396 if (instr->hydrogen_value()->representation().IsSmi()) {
1397 __ SmiToInteger64(left, left);
1405 DeoptimizeIf(
overflow, instr->environment());
1411 if (instr->hydrogen_value()->representation().IsSmi()) {
1412 __ testp(left, left);
1414 __ testl(left, left);
1417 if (right->IsConstantOperand()) {
1419 ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1420 if (
ToInteger32(LConstantOperand::cast(right)) < 0) {
1422 }
else if (
ToInteger32(LConstantOperand::cast(right)) == 0) {
1424 DeoptimizeIf(
less, instr->environment());
1426 }
else if (right->IsStackSlot()) {
1427 if (instr->hydrogen_value()->representation().IsSmi()) {
1432 DeoptimizeIf(
sign, instr->environment());
1435 if (instr->hydrogen_value()->representation().IsSmi()) {
1440 DeoptimizeIf(
sign, instr->environment());
1447 void LCodeGen::DoBitI(LBitI* instr) {
1448 LOperand* left = instr->left();
1449 LOperand* right = instr->right();
1450 ASSERT(left->Equals(instr->result()));
1451 ASSERT(left->IsRegister());
1453 if (right->IsConstantOperand()) {
1455 switch (instr->op()) {
1456 case Token::BIT_AND:
1462 case Token::BIT_XOR:
1463 if (right_operand ==
int32_t(~0)) {
1473 }
else if (right->IsStackSlot()) {
1474 switch (instr->op()) {
1475 case Token::BIT_AND:
1481 case Token::BIT_XOR:
1489 ASSERT(right->IsRegister());
1490 switch (instr->op()) {
1491 case Token::BIT_AND:
1497 case Token::BIT_XOR:
1508 void LCodeGen::DoShiftI(LShiftI* instr) {
1509 LOperand* left = instr->left();
1510 LOperand* right = instr->right();
1511 ASSERT(left->Equals(instr->result()));
1512 ASSERT(left->IsRegister());
1513 if (right->IsRegister()) {
1516 switch (instr->op()) {
1525 if (instr->can_deopt()) {
1527 DeoptimizeIf(
negative, instr->environment());
1539 uint8_t shift_count =
static_cast<uint8_t
>(value & 0x1F);
1540 switch (instr->op()) {
1542 if (shift_count != 0) {
1547 if (shift_count != 0) {
1552 if (shift_count == 0 && instr->can_deopt()) {
1554 DeoptimizeIf(
negative, instr->environment());
1560 if (shift_count != 0) {
1561 if (instr->hydrogen_value()->representation().IsSmi()) {
1576 void LCodeGen::DoSubI(LSubI* instr) {
1577 LOperand* left = instr->left();
1578 LOperand* right = instr->right();
1579 ASSERT(left->Equals(instr->result()));
1581 if (right->IsConstantOperand()) {
1583 Immediate(
ToInteger32(LConstantOperand::cast(right))));
1584 }
else if (right->IsRegister()) {
1585 if (instr->hydrogen_value()->representation().IsSmi()) {
1591 if (instr->hydrogen_value()->representation().IsSmi()) {
1599 DeoptimizeIf(
overflow, instr->environment());
1604 void LCodeGen::DoConstantI(LConstantI* instr) {
1609 void LCodeGen::DoConstantS(LConstantS* instr) {
1614 void LCodeGen::DoConstantD(LConstantD* instr) {
1615 ASSERT(instr->result()->IsDoubleRegister());
1617 double v = instr->value();
1618 uint64_t int_val = BitCast<uint64_t, double>(v);
1625 __ Set(tmp, int_val);
1631 void LCodeGen::DoConstantE(LConstantE* instr) {
1632 __ LoadAddress(
ToRegister(instr->result()), instr->value());
1636 void LCodeGen::DoConstantT(LConstantT* instr) {
1637 Handle<Object> value = instr->value(isolate());
1642 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1643 Register result =
ToRegister(instr->result());
1645 __ EnumLength(result, map);
1649 void LCodeGen::DoDateField(LDateField* instr) {
1651 Register result =
ToRegister(instr->result());
1652 Smi* index = instr->index();
1653 Label runtime, done, not_date_object;
1654 ASSERT(
object.is(result));
1657 Condition cc = masm()->CheckSmi(
object);
1658 DeoptimizeIf(cc, instr->environment());
1660 DeoptimizeIf(
not_equal, instr->environment());
1662 if (index->value() == 0) {
1666 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1667 Operand stamp_operand =
__ ExternalOperand(stamp);
1674 __ jmp(&done, Label::kNear);
1677 __ PrepareCallCFunction(2);
1680 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1686 Operand LCodeGen::BuildSeqStringOperand(Register
string,
1689 if (index->IsConstantOperand()) {
1690 int offset =
ToInteger32(LConstantOperand::cast(index));
1704 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1706 Register result =
ToRegister(instr->result());
1707 Register
string =
ToRegister(instr->string());
1709 if (FLAG_debug_code) {
1718 ? one_byte_seq_type : two_byte_seq_type));
1719 __ Check(
equal, kUnexpectedStringType);
1723 Operand operand = BuildSeqStringOperand(
string, instr->index(), encoding);
1725 __ movzxbl(result, operand);
1727 __ movzxwl(result, operand);
1732 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1734 Register
string =
ToRegister(instr->string());
1736 if (FLAG_debug_code) {
1743 ? one_byte_seq_type : two_byte_seq_type;
1744 __ EmitSeqStringSetCharCheck(
string, index, value, encoding_mask);
1747 Operand operand = BuildSeqStringOperand(
string, instr->index(), encoding);
1748 if (instr->value()->IsConstantOperand()) {
1749 int value =
ToInteger32(LConstantOperand::cast(instr->value()));
1753 __ movb(operand, Immediate(value));
1756 __ movw(operand, Immediate(value));
1761 __ movb(operand, value);
1763 __ movw(operand, value);
1769 void LCodeGen::DoAddI(LAddI* instr) {
1770 LOperand* left = instr->left();
1771 LOperand* right = instr->right();
1773 Representation target_rep = instr->hydrogen()->representation();
1774 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1776 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1777 if (right->IsConstantOperand()) {
1795 if (right->IsConstantOperand()) {
1798 Immediate(
ToInteger32(LConstantOperand::cast(right))));
1801 Immediate(
ToInteger32(LConstantOperand::cast(right))));
1803 }
else if (right->IsRegister()) {
1817 DeoptimizeIf(
overflow, instr->environment());
1823 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1824 LOperand* left = instr->left();
1825 LOperand* right = instr->right();
1826 ASSERT(left->Equals(instr->result()));
1827 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1828 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1830 Condition condition = (operation == HMathMinMax::kMathMin)
1834 if (right->IsConstantOperand()) {
1835 Immediate right_imm =
1836 Immediate(
ToInteger32(LConstantOperand::cast(right)));
1837 ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1838 __ cmpl(left_reg, right_imm);
1839 __ j(condition, &return_left, Label::kNear);
1840 __ movp(left_reg, right_imm);
1841 }
else if (right->IsRegister()) {
1843 if (instr->hydrogen_value()->representation().IsSmi()) {
1844 __ cmpp(left_reg, right_reg);
1846 __ cmpl(left_reg, right_reg);
1848 __ j(condition, &return_left, Label::kNear);
1849 __ movp(left_reg, right_reg);
1852 if (instr->hydrogen_value()->representation().IsSmi()) {
1853 __ cmpp(left_reg, right_op);
1855 __ cmpl(left_reg, right_op);
1857 __ j(condition, &return_left, Label::kNear);
1858 __ movp(left_reg, right_op);
1860 __ bind(&return_left);
1862 ASSERT(instr->hydrogen()->representation().IsDouble());
1863 Label check_nan_left, check_zero, return_left, return_right;
1867 __ ucomisd(left_reg, right_reg);
1869 __ j(
equal, &check_zero, Label::kNear);
1870 __ j(condition, &return_left, Label::kNear);
1871 __ jmp(&return_right, Label::kNear);
1873 __ bind(&check_zero);
1874 XMMRegister xmm_scratch = double_scratch0();
1875 __ xorps(xmm_scratch, xmm_scratch);
1876 __ ucomisd(left_reg, xmm_scratch);
1879 if (operation == HMathMinMax::kMathMin) {
1880 __ orps(left_reg, right_reg);
1883 __ addsd(left_reg, right_reg);
1885 __ jmp(&return_left, Label::kNear);
1887 __ bind(&check_nan_left);
1888 __ ucomisd(left_reg, left_reg);
1890 __ bind(&return_right);
1891 __ movaps(left_reg, right_reg);
1893 __ bind(&return_left);
1898 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1903 ASSERT(instr->op() == Token::MOD || left.is(result));
1904 switch (instr->op()) {
1906 __ addsd(left, right);
1909 __ subsd(left, right);
1912 __ mulsd(left, right);
1915 __ divsd(left, right);
1918 __ movaps(left, left);
1921 XMMRegister xmm_scratch = double_scratch0();
1922 __ PrepareCallCFunction(2);
1923 __ movaps(xmm_scratch, left);
1926 ExternalReference::mod_two_doubles_operation(isolate()), 2);
1927 __ movaps(result, xmm_scratch);
1937 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1944 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1948 template<
class InstrType>
1949 void LCodeGen::EmitBranch(InstrType instr,
Condition cc) {
1950 int left_block = instr->TrueDestination(chunk_);
1951 int right_block = instr->FalseDestination(chunk_);
1953 int next_block = GetNextEmittedBlock();
1956 EmitGoto(left_block);
1957 }
else if (left_block == next_block) {
1959 }
else if (right_block == next_block) {
1960 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1962 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1964 __ jmp(chunk_->GetAssemblyLabel(right_block));
1970 template<
class InstrType>
1971 void LCodeGen::EmitFalseBranch(InstrType instr,
Condition cc) {
1972 int false_block = instr->FalseDestination(chunk_);
1973 __ j(cc, chunk_->GetAssemblyLabel(false_block));
1977 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
1982 void LCodeGen::DoBranch(LBranch* instr) {
1983 Representation r = instr->hydrogen()->value()->representation();
1984 if (r.IsInteger32()) {
1989 }
else if (r.IsSmi()) {
1994 }
else if (r.IsDouble()) {
1997 XMMRegister xmm_scratch = double_scratch0();
1998 __ xorps(xmm_scratch, xmm_scratch);
1999 __ ucomisd(reg, xmm_scratch);
2004 HType type = instr->hydrogen()->value()->
type();
2005 if (type.IsBoolean()) {
2007 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2008 EmitBranch(instr,
equal);
2009 }
else if (type.IsSmi()) {
2013 }
else if (type.IsJSArray()) {
2016 }
else if (type.IsHeapNumber()) {
2018 XMMRegister xmm_scratch = double_scratch0();
2019 __ xorps(xmm_scratch, xmm_scratch);
2022 }
else if (type.IsString()) {
2027 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2033 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2034 __ j(
equal, instr->FalseLabel(chunk_));
2038 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2039 __ j(
equal, instr->TrueLabel(chunk_));
2041 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2042 __ j(
equal, instr->FalseLabel(chunk_));
2046 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2047 __ j(
equal, instr->FalseLabel(chunk_));
2053 __ j(
equal, instr->FalseLabel(chunk_));
2054 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2055 }
else if (expected.NeedsMap()) {
2058 DeoptimizeIf(
zero, instr->environment());
2062 if (expected.NeedsMap()) {
2065 if (expected.CanBeUndetectable()) {
2086 __ jmp(instr->FalseLabel(chunk_));
2087 __ bind(¬_string);
2093 __ j(
equal, instr->TrueLabel(chunk_));
2098 Label not_heap_number;
2099 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2101 XMMRegister xmm_scratch = double_scratch0();
2102 __ xorps(xmm_scratch, xmm_scratch);
2104 __ j(
zero, instr->FalseLabel(chunk_));
2105 __ jmp(instr->TrueLabel(chunk_));
2106 __ bind(¬_heap_number);
2109 if (!expected.IsGeneric()) {
2119 void LCodeGen::EmitGoto(
int block) {
2121 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2126 void LCodeGen::DoGoto(LGoto* instr) {
2127 EmitGoto(instr->block_id());
2135 case Token::EQ_STRICT:
2139 case Token::NE_STRICT:
2155 case Token::INSTANCEOF:
2163 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2164 LOperand* left = instr->left();
2165 LOperand* right = instr->right();
2166 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2168 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2170 double left_val =
ToDouble(LConstantOperand::cast(left));
2171 double right_val =
ToDouble(LConstantOperand::cast(right));
2172 int next_block =
EvalComparison(instr->op(), left_val, right_val) ?
2173 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2174 EmitGoto(next_block);
2176 if (instr->is_double()) {
2183 if (right->IsConstantOperand()) {
2184 value =
ToInteger32(LConstantOperand::cast(right));
2185 if (instr->hydrogen_value()->representation().IsSmi()) {
2190 }
else if (left->IsConstantOperand()) {
2191 value =
ToInteger32(LConstantOperand::cast(left));
2192 if (instr->hydrogen_value()->representation().IsSmi()) {
2193 if (right->IsRegister()) {
2198 }
else if (right->IsRegister()) {
2205 }
else if (instr->hydrogen_value()->representation().IsSmi()) {
2206 if (right->IsRegister()) {
2212 if (right->IsRegister()) {
2219 EmitBranch(instr, cc);
2224 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2227 if (instr->right()->IsConstantOperand()) {
2228 Handle<Object> right =
ToHandle(LConstantOperand::cast(instr->right()));
2229 __ Cmp(left, right);
2232 __ cmpp(left, right);
2234 EmitBranch(instr,
equal);
2238 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2239 if (instr->hydrogen()->representation().IsTagged()) {
2240 Register input_reg =
ToRegister(instr->object());
2241 __ Cmp(input_reg, factory()->the_hole_value());
2242 EmitBranch(instr,
equal);
2247 __ ucomisd(input_reg, input_reg);
2256 EmitBranch(instr,
equal);
2260 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2261 Representation rep = instr->hydrogen()->value()->representation();
2262 ASSERT(!rep.IsInteger32());
2264 if (rep.IsDouble()) {
2266 XMMRegister xmm_scratch = double_scratch0();
2267 __ xorps(xmm_scratch, xmm_scratch);
2268 __ ucomisd(xmm_scratch, value);
2275 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2276 __ CheckMap(value, map, instr->FalseLabel(chunk()),
DO_SMI_CHECK);
2281 Immediate(0x00000000));
2282 EmitBranch(instr,
equal);
2287 Condition LCodeGen::EmitIsObject(Register input,
2288 Label* is_not_object,
2292 __ JumpIfSmi(input, is_not_object);
2294 __ CompareRoot(input, Heap::kNullValueRootIndex);
2312 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2316 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2318 EmitBranch(instr, true_cond);
2322 Condition LCodeGen::EmitIsString(Register input,
2324 Label* is_not_string,
2327 __ JumpIfSmi(input, is_not_string);
2330 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2336 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2341 instr->hydrogen()->value()->IsHeapObject()
2345 reg, temp, instr->FalseLabel(chunk_), check_needed);
2347 EmitBranch(instr, true_cond);
2351 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2353 if (instr->value()->IsRegister()) {
2355 is_smi = masm()->CheckSmi(input);
2357 Operand input =
ToOperand(instr->value());
2358 is_smi = masm()->CheckSmi(input);
2360 EmitBranch(instr, is_smi);
2364 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2368 if (!instr->hydrogen()->value()->IsHeapObject()) {
2369 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2378 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2383 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2385 Condition condition = TokenToCondition(op,
false);
2388 EmitBranch(instr, condition);
2392 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2401 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2404 if (from == to)
return equal;
2412 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2415 if (!instr->hydrogen()->value()->IsHeapObject()) {
2416 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2420 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2424 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2426 Register result =
ToRegister(instr->result());
2428 __ AssertString(input);
2432 __ IndexFromHash(result, result);
2436 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2437 LHasCachedArrayIndexAndBranch* instr) {
2442 EmitBranch(instr,
equal);
2448 void LCodeGen::EmitClassOfTest(Label* is_true,
2450 Handle<String> class_name,
2455 ASSERT(!input.is(temp2));
2458 __ JumpIfSmi(input, is_false);
2508 ASSERT(class_name->IsInternalizedString());
2509 __ Cmp(temp, class_name);
2514 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2518 Handle<String> class_name = instr->hydrogen()->class_name();
2520 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2521 class_name, input, temp, temp2);
2523 EmitBranch(instr,
equal);
2527 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2531 EmitBranch(instr,
equal);
2535 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2540 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2541 Label true_value, done;
2543 __ j(
zero, &true_value, Label::kNear);
2544 __ LoadRoot(
ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2545 __ jmp(&done, Label::kNear);
2546 __ bind(&true_value);
2547 __ LoadRoot(
ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2552 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2555 DeferredInstanceOfKnownGlobal(
LCodeGen* codegen,
2556 LInstanceOfKnownGlobal* instr)
2559 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2561 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
2562 Label* map_check() {
return &map_check_; }
2564 LInstanceOfKnownGlobal* instr_;
2569 DeferredInstanceOfKnownGlobal* deferred;
2570 deferred =
new(zone()) DeferredInstanceOfKnownGlobal(
this, instr);
2572 Label done, false_result;
2573 Register
object =
ToRegister(instr->value());
2576 __ JumpIfSmi(
object, &false_result, Label::kNear);
2585 __ bind(deferred->map_check());
2586 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2591 __ LoadRoot(
ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2594 Label end_of_patched_code;
2595 __ bind(&end_of_patched_code);
2598 __ jmp(&done, Label::kNear);
2602 __ bind(&cache_miss);
2603 __ CompareRoot(
object, Heap::kNullValueRootIndex);
2604 __ j(
equal, &false_result, Label::kNear);
2609 __ bind(&false_result);
2610 __ LoadRoot(
ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2612 __ bind(deferred->exit());
2620 PushSafepointRegistersScope scope(
this);
2623 InstanceofStub stub(flags);
2626 __ Push(instr->function());
2628 static const int kAdditionalDelta = 10;
2630 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2632 __ PushImm32(delta);
2638 CallCodeGeneric(stub.GetCode(isolate()),
2639 RelocInfo::CODE_TARGET,
2641 RECORD_SAFEPOINT_WITH_REGISTERS,
2643 ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2644 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2645 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2654 __ LoadRoot(
rax, Heap::kTrueValueRootIndex);
2655 __ jmp(&done, Label::kNear);
2656 __ bind(&load_false);
2657 __ LoadRoot(
rax, Heap::kFalseValueRootIndex);
2662 void LCodeGen::DoCmpT(LCmpT* instr) {
2667 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2669 Condition condition = TokenToCondition(op,
false);
2670 Label true_value, done;
2672 __ j(condition, &true_value, Label::kNear);
2673 __ LoadRoot(
ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2674 __ jmp(&done, Label::kNear);
2675 __ bind(&true_value);
2676 __ LoadRoot(
ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2681 void LCodeGen::DoReturn(LReturn* instr) {
2682 if (FLAG_trace &&
info()->IsOptimizing()) {
2689 __ CallRuntime(Runtime::kTraceExit, 1);
2691 if (
info()->saves_caller_doubles()) {
2692 RestoreCallerDoubles();
2694 int no_frame_start = -1;
2698 no_frame_start = masm_->pc_offset();
2700 if (instr->has_constant_parameter_count()) {
2704 Register reg =
ToRegister(instr->parameter_count());
2706 __ SmiToInteger32(reg, reg);
2707 Register return_addr_reg = reg.is(
rcx) ?
rbx :
rcx;
2708 __ PopReturnAddressTo(return_addr_reg);
2711 __ jmp(return_addr_reg);
2713 if (no_frame_start != -1) {
2714 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2719 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2720 Register result =
ToRegister(instr->result());
2721 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2722 if (instr->hydrogen()->RequiresHoleCheck()) {
2723 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2724 DeoptimizeIf(
equal, instr->environment());
2729 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2734 __ Move(
rcx, instr->name());
2737 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2741 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2743 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2749 if (instr->hydrogen()->RequiresHoleCheck()) {
2753 __ Move(cell, cell_handle, RelocInfo::CELL);
2754 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2755 DeoptimizeIf(
equal, instr->environment());
2757 __ movp(Operand(cell, 0), value);
2767 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2768 Register context =
ToRegister(instr->context());
2769 Register result =
ToRegister(instr->result());
2771 if (instr->hydrogen()->RequiresHoleCheck()) {
2772 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2773 if (instr->hydrogen()->DeoptimizesOnHole()) {
2774 DeoptimizeIf(
equal, instr->environment());
2778 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2779 __ bind(&is_not_hole);
2785 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2786 Register context =
ToRegister(instr->context());
2791 Label skip_assignment;
2792 if (instr->hydrogen()->RequiresHoleCheck()) {
2793 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2794 if (instr->hydrogen()->DeoptimizesOnHole()) {
2795 DeoptimizeIf(
equal, instr->environment());
2800 __ movp(target, value);
2802 if (instr->hydrogen()->NeedsWriteBarrier()) {
2804 instr->hydrogen()->value()->IsHeapObject()
2807 Register scratch =
ToRegister(instr->temp());
2808 __ RecordWriteContextSlot(context,
2817 __ bind(&skip_assignment);
2821 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2822 HObjectAccess access = instr->hydrogen()->access();
2823 int offset = access.offset();
2825 if (access.IsExternalMemory()) {
2826 Register result =
ToRegister(instr->result());
2827 if (instr->object()->IsConstantOperand()) {
2829 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2831 Register
object =
ToRegister(instr->object());
2837 Register
object =
ToRegister(instr->object());
2838 if (instr->hydrogen()->representation().IsDouble()) {
2844 Register result =
ToRegister(instr->result());
2845 if (!access.IsInobject()) {
2850 Representation representation = access.representation();
2851 if (representation.IsSmi() &&
2852 instr->hydrogen()->representation().IsInteger32()) {
2856 __ AssertSmi(scratch);
2869 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2874 __ Move(
rcx, instr->name());
2876 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2880 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2881 Register
function =
ToRegister(instr->function());
2882 Register result =
ToRegister(instr->result());
2886 DeoptimizeIf(
not_equal, instr->environment());
2899 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2900 DeoptimizeIf(
equal, instr->environment());
2909 __ jmp(&done, Label::kNear);
2913 __ bind(&non_instance);
2921 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2922 Register result =
ToRegister(instr->result());
2923 __ LoadRoot(result, instr->index());
2927 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2928 Register arguments =
ToRegister(instr->arguments());
2929 Register result =
ToRegister(instr->result());
2931 if (instr->length()->IsConstantOperand() &&
2932 instr->index()->IsConstantOperand()) {
2935 if (const_index >= 0 && const_index < const_length) {
2936 StackArgumentsAccessor args(arguments, const_length,
2938 __ movp(result, args.GetArgumentOperand(const_index));
2939 }
else if (FLAG_debug_code) {
2943 Register length =
ToRegister(instr->length());
2946 if (instr->index()->IsRegister()) {
2951 StackArgumentsAccessor args(arguments, length,
2953 __ movp(result, args.GetArgumentOperand(0));
2958 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2960 LOperand* key = instr->key();
2961 int base_offset = instr->is_fixed_typed_array()
2964 Operand operand(BuildFastArrayOperand(
2969 instr->additional_index()));
2974 __ movss(result, operand);
2975 __ cvtss2sd(result, result);
2980 Register result(
ToRegister(instr->result()));
2981 switch (elements_kind) {
2984 __ movsxbq(result, operand);
2990 __ movzxbp(result, operand);
2994 __ movsxwq(result, operand);
2998 __ movzxwp(result, operand);
3002 __ movsxlq(result, operand);
3006 __ movl(result, operand);
3008 __ testl(result, result);
3009 DeoptimizeIf(
negative, instr->environment());
3031 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3033 LOperand* key = instr->key();
3034 if (instr->hydrogen()->RequiresHoleCheck()) {
3037 Operand hole_check_operand = BuildFastArrayOperand(
3042 instr->additional_index());
3044 DeoptimizeIf(
equal, instr->environment());
3047 Operand double_load_operand = BuildFastArrayOperand(
3052 instr->additional_index());
3053 __ movsd(result, double_load_operand);
3057 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3058 HLoadKeyed* hinstr = instr->hydrogen();
3059 Register result =
ToRegister(instr->result());
3060 LOperand* key = instr->key();
3061 bool requires_hole_check = hinstr->RequiresHoleCheck();
3063 Representation representation = hinstr->representation();
3065 if (representation.IsInteger32() &&
3067 ASSERT(!requires_hole_check);
3071 BuildFastArrayOperand(instr->elements(),
3075 instr->additional_index()),
3077 __ AssertSmi(scratch);
3086 BuildFastArrayOperand(instr->elements(),
3090 instr->additional_index()),
3094 if (requires_hole_check) {
3099 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3100 DeoptimizeIf(
equal, instr->environment());
3106 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3107 if (instr->is_typed_elements()) {
3108 DoLoadKeyedExternalArray(instr);
3109 }
else if (instr->hydrogen()->representation().IsDouble()) {
3110 DoLoadKeyedFixedDoubleArray(instr);
3112 DoLoadKeyedFixedArray(instr);
3117 Operand LCodeGen::BuildFastArrayOperand(
3118 LOperand* elements_pointer,
3122 uint32_t additional_index) {
3123 Register elements_pointer_reg =
ToRegister(elements_pointer);
3125 if (key->IsConstantOperand()) {
3127 if (constant_value & 0xF0000000) {
3128 Abort(kArrayIndexConstantValueTooBig);
3130 return Operand(elements_pointer_reg,
3131 ((constant_value + additional_index) << shift_size)
3135 return Operand(elements_pointer_reg,
3138 offset + (additional_index << shift_size));
3143 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3148 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3149 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3153 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3154 Register result =
ToRegister(instr->result());
3156 if (instr->hydrogen()->from_inlined()) {
3160 Label done, adapted;
3164 __ j(
equal, &adapted, Label::kNear);
3167 __ movp(result,
rbp);
3168 __ jmp(&done, Label::kNear);
3181 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3182 Register result =
ToRegister(instr->result());
3187 if (instr->elements()->IsRegister()) {
3192 __ movl(result, Immediate(scope()->num_parameters()));
3193 __ j(
equal, &done, Label::kNear);
3197 __ SmiToInteger32(result,
3206 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3207 Register receiver =
ToRegister(instr->receiver());
3208 Register
function =
ToRegister(instr->function());
3213 Label global_object, receiver_ok;
3214 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3216 if (!instr->hydrogen()->known_function()) {
3234 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3235 __ j(
equal, &global_object, Label::kNear);
3236 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3237 __ j(
equal, &global_object, Label::kNear);
3241 DeoptimizeIf(is_smi, instr->environment());
3243 DeoptimizeIf(
below, instr->environment());
3245 __ jmp(&receiver_ok, Label::kNear);
3246 __ bind(&global_object);
3254 __ bind(&receiver_ok);
3258 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3259 Register receiver =
ToRegister(instr->receiver());
3260 Register
function =
ToRegister(instr->function());
3261 Register length =
ToRegister(instr->length());
3262 Register elements =
ToRegister(instr->elements());
3269 const uint32_t kArgumentsLimit = 1 *
KB;
3270 __ cmpp(length, Immediate(kArgumentsLimit));
3271 DeoptimizeIf(
above, instr->environment());
3274 __ movp(receiver, length);
3280 __ testl(length, length);
3281 __ j(
zero, &invoke, Label::kNear);
3283 StackArgumentsAccessor args(elements, length,
3285 __ Push(args.GetArgumentOperand(0));
3291 ASSERT(instr->HasPointerMap());
3292 LPointerMap* pointers = instr->pointer_map();
3294 this, pointers, Safepoint::kLazyDeopt);
3295 ParameterCount actual(
rax);
3296 __ InvokeFunction(
function, actual,
CALL_FUNCTION, safepoint_generator);
3300 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3301 LOperand* argument = instr->value();
3302 EmitPushTaggedOperand(argument);
3306 void LCodeGen::DoDrop(LDrop* instr) {
3307 __ Drop(instr->count());
3311 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3312 Register result =
ToRegister(instr->result());
3317 void LCodeGen::DoContext(LContext* instr) {
3318 Register result =
ToRegister(instr->result());
3319 if (
info()->IsOptimizing()) {
3328 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3331 __ Push(instr->hydrogen()->pairs());
3333 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3337 void LCodeGen::CallKnownFunction(Handle<JSFunction>
function,
3340 LInstruction* instr,
3341 RDIState rdi_state) {
3342 bool dont_adapt_arguments =
3344 bool can_invoke_directly =
3345 dont_adapt_arguments || formal_parameter_count == arity;
3347 LPointerMap* pointers = instr->pointer_map();
3349 if (can_invoke_directly) {
3350 if (rdi_state == RDI_UNINITIALIZED) {
3351 __ Move(
rdi,
function);
3359 if (dont_adapt_arguments) {
3364 if (
function.is_identical_to(
info()->closure())) {
3371 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3375 this, pointers, Safepoint::kLazyDeopt);
3376 ParameterCount count(arity);
3377 ParameterCount expected(formal_parameter_count);
3378 __ InvokeFunction(
function, expected, count,
CALL_FUNCTION, generator);
3383 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3386 LPointerMap* pointers = instr->pointer_map();
3389 if (instr->target()->IsConstantOperand()) {
3390 LConstantOperand* target = LConstantOperand::cast(instr->target());
3392 generator.BeforeCall(
__ CallSize(code));
3393 __ call(code, RelocInfo::CODE_TARGET);
3395 ASSERT(instr->target()->IsRegister());
3396 Register target =
ToRegister(instr->target());
3397 generator.BeforeCall(
__ CallSize(target));
3401 generator.AfterCall();
3405 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3409 if (instr->hydrogen()->pass_argument_count()) {
3410 __ Set(
rax, instr->arity());
3416 LPointerMap* pointers = instr->pointer_map();
3419 bool is_self_call =
false;
3420 if (instr->hydrogen()->function()->IsConstant()) {
3422 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3424 is_self_call = jsfun.is_identical_to(
info()->closure());
3431 generator.BeforeCall(
__ CallSize(target));
3434 generator.AfterCall();
3439 Register input_reg =
ToRegister(instr->value());
3441 Heap::kHeapNumberMapRootIndex);
3442 DeoptimizeIf(
not_equal, instr->environment());
3444 Label slow, allocated, done;
3445 Register tmp = input_reg.is(
rax) ?
rcx :
rax;
3449 PushSafepointRegistersScope scope(
this);
3459 __ AllocateHeapNumber(tmp, tmp2, &slow);
3460 __ jmp(&allocated, Label::kNear);
3464 CallRuntimeFromDeferred(
3465 Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
3467 if (!tmp.is(
rax))
__ movp(tmp,
rax);
3469 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3471 __ bind(&allocated);
3473 __ shl(tmp2, Immediate(1));
3474 __ shr(tmp2, Immediate(1));
3476 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3482 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3483 Register input_reg =
ToRegister(instr->value());
3484 __ testl(input_reg, input_reg);
3488 DeoptimizeIf(
negative, instr->environment());
3489 __ bind(&is_positive);
3493 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3494 Register input_reg =
ToRegister(instr->value());
3495 __ testp(input_reg, input_reg);
3499 DeoptimizeIf(
negative, instr->environment());
3500 __ bind(&is_positive);
3504 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3508 DeferredMathAbsTaggedHeapNumber(
LCodeGen* codegen, LMathAbs* instr)
3511 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3513 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
3518 ASSERT(instr->value()->Equals(instr->result()));
3519 Representation r = instr->hydrogen()->value()->representation();
3522 XMMRegister scratch = double_scratch0();
3524 __ xorps(scratch, scratch);
3525 __ subsd(scratch, input_reg);
3526 __ andps(input_reg, scratch);
3527 }
else if (r.IsInteger32()) {
3528 EmitIntegerMathAbs(instr);
3529 }
else if (r.IsSmi()) {
3530 EmitSmiMathAbs(instr);
3532 DeferredMathAbsTaggedHeapNumber* deferred =
3533 new(zone()) DeferredMathAbsTaggedHeapNumber(
this, instr);
3534 Register input_reg =
ToRegister(instr->value());
3536 __ JumpIfNotSmi(input_reg, deferred->entry());
3537 EmitSmiMathAbs(instr);
3538 __ bind(deferred->exit());
3543 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3544 XMMRegister xmm_scratch = double_scratch0();
3545 Register output_reg =
ToRegister(instr->result());
3549 CpuFeatureScope scope(masm(),
SSE4_1);
3552 __ movq(output_reg, input_reg);
3553 __ subq(output_reg, Immediate(1));
3554 DeoptimizeIf(
overflow, instr->environment());
3557 __ cvttsd2si(output_reg, xmm_scratch);
3558 __ cmpl(output_reg, Immediate(0x1));
3559 DeoptimizeIf(
overflow, instr->environment());
3561 Label negative_sign, done;
3563 __ xorps(xmm_scratch, xmm_scratch);
3564 __ ucomisd(input_reg, xmm_scratch);
3566 __ j(
below, &negative_sign, Label::kNear);
3570 Label positive_sign;
3571 __ j(
above, &positive_sign, Label::kNear);
3572 __ movmskpd(output_reg, input_reg);
3573 __ testq(output_reg, Immediate(1));
3574 DeoptimizeIf(
not_zero, instr->environment());
3575 __ Set(output_reg, 0);
3576 __ jmp(&done, Label::kNear);
3577 __ bind(&positive_sign);
3581 __ cvttsd2si(output_reg, input_reg);
3583 __ cmpl(output_reg, Immediate(0x1));
3584 DeoptimizeIf(
overflow, instr->environment());
3585 __ jmp(&done, Label::kNear);
3588 __ bind(&negative_sign);
3590 __ cvttsd2si(output_reg, input_reg);
3591 __ Cvtlsi2sd(xmm_scratch, output_reg);
3592 __ ucomisd(input_reg, xmm_scratch);
3593 __ j(
equal, &done, Label::kNear);
3594 __ subl(output_reg, Immediate(1));
3595 DeoptimizeIf(
overflow, instr->environment());
3602 void LCodeGen::DoMathRound(LMathRound* instr) {
3603 const XMMRegister xmm_scratch = double_scratch0();
3604 Register output_reg =
ToRegister(instr->result());
3607 static int64_t one_half =
V8_INT64_C(0x3FE0000000000000);
3608 static int64_t minus_one_half =
V8_INT64_C(0xBFE0000000000000);
3610 Label done, round_to_zero, below_one_half;
3611 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3614 __ ucomisd(xmm_scratch, input_reg);
3615 __ j(
above, &below_one_half, Label::kNear);
3618 __ addsd(xmm_scratch, input_reg);
3619 __ cvttsd2si(output_reg, xmm_scratch);
3621 __ cmpl(output_reg, Immediate(0x1));
3622 __ RecordComment(
"D2I conversion overflow");
3623 DeoptimizeIf(
overflow, instr->environment());
3624 __ jmp(&done, dist);
3626 __ bind(&below_one_half);
3629 __ ucomisd(xmm_scratch, input_reg);
3634 __ movq(input_temp, input_reg);
3635 __ subsd(input_temp, xmm_scratch);
3636 __ cvttsd2si(output_reg, input_temp);
3638 __ cmpl(output_reg, Immediate(0x1));
3639 __ RecordComment(
"D2I conversion overflow");
3640 DeoptimizeIf(
overflow, instr->environment());
3642 __ Cvtlsi2sd(xmm_scratch, output_reg);
3643 __ ucomisd(xmm_scratch, input_temp);
3645 __ subl(output_reg, Immediate(1));
3647 __ jmp(&done, dist);
3649 __ bind(&round_to_zero);
3653 __ movq(output_reg, input_reg);
3654 __ testq(output_reg, output_reg);
3655 __ RecordComment(
"Minus zero");
3656 DeoptimizeIf(
negative, instr->environment());
3658 __ Set(output_reg, 0);
3663 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3666 __ sqrtsd(input_reg, input_reg);
3670 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3671 XMMRegister xmm_scratch = double_scratch0();
3683 __ ucomisd(xmm_scratch, input_reg);
3687 __ j(
carry, &sqrt, Label::kNear);
3689 __ xorps(input_reg, input_reg);
3690 __ subsd(input_reg, xmm_scratch);
3691 __ jmp(&done, Label::kNear);
3695 __ xorps(xmm_scratch, xmm_scratch);
3696 __ addsd(input_reg, xmm_scratch);
3697 __ sqrtsd(input_reg, input_reg);
3702 void LCodeGen::DoPower(LPower* instr) {
3703 Representation exponent_type = instr->hydrogen()->right()->representation();
3707 Register exponent =
rdx;
3708 ASSERT(!instr->right()->IsRegister() ||
3710 ASSERT(!instr->right()->IsDoubleRegister() ||
3715 if (exponent_type.IsSmi()) {
3718 }
else if (exponent_type.IsTagged()) {
3720 __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
3722 DeoptimizeIf(
not_equal, instr->environment());
3726 }
else if (exponent_type.IsInteger32()) {
3730 ASSERT(exponent_type.IsDouble());
3737 void LCodeGen::DoMathExp(LMathExp* instr) {
3740 XMMRegister temp0 = double_scratch0();
3748 void LCodeGen::DoMathLog(LMathLog* instr) {
3749 ASSERT(instr->value()->Equals(instr->result()));
3751 XMMRegister xmm_scratch = double_scratch0();
3753 __ xorps(xmm_scratch, xmm_scratch);
3754 __ ucomisd(input_reg, xmm_scratch);
3755 __ j(
above, &positive, Label::kNear);
3757 ExternalReference nan =
3758 ExternalReference::address_of_canonical_non_hole_nan();
3759 Operand nan_operand = masm()->ExternalOperand(nan);
3760 __ movsd(input_reg, nan_operand);
3761 __ jmp(&done, Label::kNear);
3763 ExternalReference ninf =
3764 ExternalReference::address_of_negative_infinity();
3765 Operand ninf_operand = masm()->ExternalOperand(ninf);
3766 __ movsd(input_reg, ninf_operand);
3767 __ jmp(&done, Label::kNear);
3771 __ movsd(Operand(
rsp, 0), input_reg);
3772 __ fld_d(Operand(
rsp, 0));
3774 __ fstp_d(Operand(
rsp, 0));
3775 __ movsd(input_reg, Operand(
rsp, 0));
3781 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3783 Register result =
ToRegister(instr->result());
3784 Label not_zero_input;
3785 __ bsrl(result, input);
3790 __ bind(¬_zero_input);
3791 __ xorl(result, Immediate(31));
3795 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3798 ASSERT(instr->HasPointerMap());
3800 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3801 if (known_function.is_null()) {
3802 LPointerMap* pointers = instr->pointer_map();
3804 ParameterCount count(instr->arity());
3807 CallKnownFunction(known_function,
3808 instr->hydrogen()->formal_parameter_count(),
3811 RDI_CONTAINS_TARGET);
3816 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3821 int arity = instr->arity();
3822 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
3823 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3827 void LCodeGen::DoCallNew(LCallNew* instr) {
3832 __ Set(
rax, instr->arity());
3834 __ LoadRoot(
rbx, Heap::kUndefinedValueRootIndex);
3836 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3840 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3845 __ Set(
rax, instr->arity());
3846 __ LoadRoot(
rbx, Heap::kUndefinedValueRootIndex);
3847 ElementsKind kind = instr->hydrogen()->elements_kind();
3853 if (instr->arity() == 0) {
3854 ArrayNoArgumentConstructorStub stub(kind, override_mode);
3855 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3856 }
else if (instr->arity() == 1) {
3864 __ j(zero, &packed_case, Label::kNear);
3867 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
3868 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3869 __ jmp(&done, Label::kNear);
3870 __ bind(&packed_case);
3873 ArraySingleArgumentConstructorStub stub(kind, override_mode);
3874 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3877 ArrayNArgumentsConstructorStub stub(kind, override_mode);
3878 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3883 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3885 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
3889 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3890 Register
function =
ToRegister(instr->function());
3891 Register code_object =
ToRegister(instr->code_object());
3897 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3898 Register result =
ToRegister(instr->result());
3899 Register base =
ToRegister(instr->base_object());
3900 if (instr->offset()->IsConstantOperand()) {
3901 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3904 Register offset =
ToRegister(instr->offset());
3905 __ leap(result, Operand(base, offset,
times_1, 0));
3910 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3911 HStoreNamedField* hinstr = instr->hydrogen();
3912 Representation representation = instr->representation();
3914 HObjectAccess access = hinstr->access();
3915 int offset = access.offset();
3917 if (access.IsExternalMemory()) {
3918 ASSERT(!hinstr->NeedsWriteBarrier());
3920 if (instr->object()->IsConstantOperand()) {
3922 LConstantOperand*
object = LConstantOperand::cast(instr->object());
3923 __ store_rax(ToExternalReference(
object));
3925 Register
object =
ToRegister(instr->object());
3926 __ Store(
MemOperand(
object, offset), value, representation);
3931 Register
object =
ToRegister(instr->object());
3932 Handle<Map> transition = instr->transition();
3933 SmiCheck check_needed = hinstr->value()->IsHeapObject()
3936 ASSERT(!(representation.IsSmi() &&
3937 instr->value()->IsConstantOperand() &&
3939 if (representation.IsHeapObject()) {
3940 if (instr->value()->IsConstantOperand()) {
3941 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3942 if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
3946 if (!hinstr->value()->type().IsHeapObject()) {
3949 DeoptimizeIf(cc, instr->environment());
3955 }
else if (representation.IsDouble()) {
3956 ASSERT(transition.is_null());
3957 ASSERT(access.IsInobject());
3958 ASSERT(!hinstr->NeedsWriteBarrier());
3964 if (!transition.is_null()) {
3965 if (!hinstr->NeedsWriteBarrierForMap()) {
3972 __ RecordWriteField(
object,
3983 Register write_register = object;
3984 if (!access.IsInobject()) {
3989 if (representation.IsSmi() &&
3990 hinstr->value()->representation().IsInteger32()) {
3995 __ AssertSmi(scratch);
4004 Operand operand =
FieldOperand(write_register, offset);
4006 if (instr->value()->IsRegister()) {
4008 __ Store(operand, value, representation);
4010 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4012 ASSERT(!hinstr->NeedsWriteBarrier());
4014 if (representation.IsSmi()) {
4018 __ movl(operand, Immediate(value));
4022 Handle<Object> handle_value =
ToHandle(operand_value);
4023 ASSERT(!hinstr->NeedsWriteBarrier());
4024 __ Move(operand, handle_value);
4028 if (hinstr->NeedsWriteBarrier()) {
4030 Register temp = access.IsInobject() ?
ToRegister(instr->temp()) :
object;
4032 __ RecordWriteField(write_register,
4043 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4048 __ Move(
rcx, instr->hydrogen()->name());
4050 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4055 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4061 DeoptimizeIf(cc, check->environment());
4066 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4067 HBoundsCheck* hinstr = instr->hydrogen();
4068 if (hinstr->skip_check())
return;
4070 Representation representation = hinstr->length()->representation();
4071 ASSERT(representation.Equals(hinstr->index()->representation()));
4072 ASSERT(representation.IsSmiOrInteger32());
4074 if (instr->length()->IsRegister()) {
4077 if (instr->index()->IsConstantOperand()) {
4079 ToInteger32(LConstantOperand::cast(instr->index()));
4080 if (representation.IsSmi()) {
4083 __ cmpl(reg, Immediate(constant_index));
4087 if (representation.IsSmi()) {
4094 Operand length =
ToOperand(instr->length());
4095 if (instr->index()->IsConstantOperand()) {
4097 ToInteger32(LConstantOperand::cast(instr->index()));
4098 if (representation.IsSmi()) {
4101 __ cmpl(length, Immediate(constant_index));
4104 if (representation.IsSmi()) {
4112 ApplyCheckIf(condition, instr);
4116 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4118 LOperand* key = instr->key();
4119 int base_offset = instr->is_fixed_typed_array()
4122 Operand operand(BuildFastArrayOperand(
4127 instr->additional_index()));
4132 __ cvtsd2ss(value, value);
4133 __ movss(operand, value);
4139 switch (elements_kind) {
4146 __ movb(operand, value);
4152 __ movw(operand, value);
4158 __ movl(operand, value);
4166 case FAST_DOUBLE_ELEMENTS:
4179 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4181 LOperand* key = instr->key();
4182 if (instr->NeedsCanonicalization()) {
4185 __ ucomisd(value, value);
4192 __ bind(&have_value);
4195 Operand double_store_operand = BuildFastArrayOperand(
4200 instr->additional_index());
4202 __ movsd(double_store_operand, value);
4206 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4207 HStoreKeyed* hinstr = instr->hydrogen();
4208 LOperand* key = instr->key();
4210 Representation representation = hinstr->value()->representation();
4212 if (representation.IsInteger32()) {
4218 BuildFastArrayOperand(instr->elements(),
4222 instr->additional_index()),
4224 __ AssertSmi(scratch);
4233 BuildFastArrayOperand(instr->elements(),
4237 instr->additional_index());
4239 if (instr->value()->IsRegister()) {
4240 __ Store(operand,
ToRegister(instr->value()), representation);
4242 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4245 if (representation.IsSmi()) {
4249 __ movl(operand, Immediate(value));
4252 Handle<Object> handle_value =
ToHandle(operand_value);
4253 __ Move(operand, handle_value);
4257 if (hinstr->NeedsWriteBarrier()) {
4258 Register elements =
ToRegister(instr->elements());
4259 ASSERT(instr->value()->IsRegister());
4261 ASSERT(!key->IsConstantOperand());
4262 SmiCheck check_needed = hinstr->value()->IsHeapObject()
4266 __ leap(key_reg, operand);
4267 __ RecordWrite(elements,
4277 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4278 if (instr->is_typed_elements()) {
4279 DoStoreKeyedExternalArray(instr);
4280 }
else if (instr->hydrogen()->value()->representation().IsDouble()) {
4281 DoStoreKeyedFixedDoubleArray(instr);
4283 DoStoreKeyedFixedArray(instr);
4288 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4294 Handle<Code> ic = instr->strict_mode() ==
STRICT
4295 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4296 : isolate()->builtins()->KeyedStoreIC_Initialize();
4297 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4301 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4302 Register object_reg =
ToRegister(instr->object());
4304 Handle<Map> from_map = instr->original_map();
4305 Handle<Map> to_map = instr->transitioned_map();
4309 Label not_applicable;
4313 Register new_map_reg =
ToRegister(instr->new_map_temp());
4314 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4322 PushSafepointRegistersScope scope(
this);
4323 if (!object_reg.is(
rax)) {
4324 __ movp(
rax, object_reg);
4326 __ Move(
rbx, to_map);
4327 bool is_js_array = from_map->instance_type() ==
JS_ARRAY_TYPE;
4328 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
4330 RecordSafepointWithRegisters(
4331 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4333 __ bind(¬_applicable);
4337 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4338 Register
object =
ToRegister(instr->object());
4340 Label no_memento_found;
4341 __ TestJSArrayForAllocationMemento(
object, temp, &no_memento_found);
4342 DeoptimizeIf(
equal, instr->environment());
4343 __ bind(&no_memento_found);
4347 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4351 StringAddStub stub(instr->hydrogen()->flags(),
4352 instr->hydrogen()->pretenure_flag());
4353 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4357 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4360 DeferredStringCharCodeAt(
LCodeGen* codegen, LStringCharCodeAt* instr)
4363 codegen()->DoDeferredStringCharCodeAt(instr_);
4365 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4367 LStringCharCodeAt* instr_;
4370 DeferredStringCharCodeAt* deferred =
4371 new(zone()) DeferredStringCharCodeAt(
this, instr);
4378 __ bind(deferred->exit());
4383 Register
string =
ToRegister(instr->string());
4384 Register result =
ToRegister(instr->result());
4391 PushSafepointRegistersScope scope(
this);
4396 if (instr->index()->IsConstantOperand()) {
4401 __ Integer32ToSmi(index, index);
4404 CallRuntimeFromDeferred(
4405 Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
4408 __ StoreToSafepointRegisterSlot(result,
rax);
4412 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4415 DeferredStringCharFromCode(
LCodeGen* codegen, LStringCharFromCode* instr)
4418 codegen()->DoDeferredStringCharFromCode(instr_);
4420 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4422 LStringCharFromCode* instr_;
4425 DeferredStringCharFromCode* deferred =
4426 new(zone()) DeferredStringCharFromCode(
this, instr);
4428 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4429 Register char_code =
ToRegister(instr->char_code());
4430 Register result =
ToRegister(instr->result());
4431 ASSERT(!char_code.is(result));
4434 __ j(
above, deferred->entry());
4435 __ movsxlq(char_code, char_code);
4436 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4440 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4441 __ j(
equal, deferred->entry());
4442 __ bind(deferred->exit());
4447 Register char_code =
ToRegister(instr->char_code());
4448 Register result =
ToRegister(instr->result());
4455 PushSafepointRegistersScope scope(
this);
4456 __ Integer32ToSmi(char_code, char_code);
4458 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4459 __ StoreToSafepointRegisterSlot(result,
rax);
4463 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4464 LOperand* input = instr->value();
4465 ASSERT(input->IsRegister() || input->IsStackSlot());
4466 LOperand* output = instr->result();
4467 ASSERT(output->IsDoubleRegister());
4468 if (input->IsRegister()) {
4476 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4477 LOperand* input = instr->value();
4478 LOperand* output = instr->result();
4479 LOperand* temp = instr->temp();
4487 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4488 LOperand* input = instr->value();
4489 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4492 __ Integer32ToSmi(reg, reg);
4496 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4499 DeferredNumberTagU(
LCodeGen* codegen, LNumberTagU* instr)
4502 codegen()->DoDeferredNumberTagU(instr_);
4504 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4506 LNumberTagU* instr_;
4509 LOperand* input = instr->value();
4510 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4513 DeferredNumberTagU* deferred =
new(zone()) DeferredNumberTagU(
this, instr);
4515 __ j(
above, deferred->entry());
4516 __ Integer32ToSmi(reg, reg);
4517 __ bind(deferred->exit());
4521 void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
4530 XMMRegister xmm_scratch = double_scratch0();
4531 __ LoadUint32(temp_xmm, reg, xmm_scratch);
4533 if (FLAG_inline_new) {
4534 __ AllocateHeapNumber(reg, tmp, &slow);
4535 __ jmp(&done, Label::kNear);
4547 PushSafepointRegistersScope scope(
this);
4555 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4556 RecordSafepointWithRegisters(
4557 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4558 __ StoreToSafepointRegisterSlot(reg,
rax);
4568 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4571 DeferredNumberTagD(
LCodeGen* codegen, LNumberTagD* instr)
4574 codegen()->DoDeferredNumberTagD(instr_);
4576 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4578 LNumberTagD* instr_;
4585 DeferredNumberTagD* deferred =
new(zone()) DeferredNumberTagD(
this, instr);
4586 if (FLAG_inline_new) {
4587 __ AllocateHeapNumber(reg, tmp, deferred->entry());
4589 __ jmp(deferred->entry());
4591 __ bind(deferred->exit());
4604 PushSafepointRegistersScope scope(
this);
4611 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4612 RecordSafepointWithRegisters(
4613 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4620 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4621 HChange* hchange = instr->hydrogen();
4623 Register output =
ToRegister(instr->result());
4626 __ testl(input, input);
4627 DeoptimizeIf(
sign, instr->environment());
4629 __ Integer32ToSmi(output, input);
4632 DeoptimizeIf(
overflow, instr->environment());
4637 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4638 ASSERT(instr->value()->Equals(instr->result()));
4640 if (instr->needs_check()) {
4644 __ AssertSmi(input);
4646 __ SmiToInteger32(input, input);
4650 void LCodeGen::EmitNumberUntagD(Register input_reg,
4651 XMMRegister result_reg,
4652 bool can_convert_undefined_to_nan,
4653 bool deoptimize_on_minus_zero,
4656 Label convert, load_smi, done;
4660 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4664 Heap::kHeapNumberMapRootIndex);
4670 if (can_convert_undefined_to_nan) {
4676 if (deoptimize_on_minus_zero) {
4677 XMMRegister xmm_scratch = double_scratch0();
4678 __ xorps(xmm_scratch, xmm_scratch);
4679 __ ucomisd(xmm_scratch, result_reg);
4685 __ jmp(&done, Label::kNear);
4687 if (can_convert_undefined_to_nan) {
4691 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4694 __ xorps(result_reg, result_reg);
4695 __ divsd(result_reg, result_reg);
4696 __ jmp(&done, Label::kNear);
4711 Register input_reg =
ToRegister(instr->value());
4713 if (instr->truncating()) {
4714 Label no_heap_number, check_bools, check_false;
4718 Heap::kHeapNumberMapRootIndex);
4720 __ TruncateHeapNumberToI(input_reg, input_reg);
4723 __ bind(&no_heap_number);
4726 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4728 __ Set(input_reg, 0);
4731 __ bind(&check_bools);
4732 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
4734 __ Set(input_reg, 1);
4737 __ bind(&check_false);
4738 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
4739 __ RecordComment(
"Deferred TaggedToI: cannot truncate");
4740 DeoptimizeIf(
not_equal, instr->environment());
4741 __ Set(input_reg, 0);
4746 __ TaggedToI(input_reg, input_reg, xmm_temp,
4747 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
4756 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4759 DeferredTaggedToI(
LCodeGen* codegen, LTaggedToI* instr)
4762 codegen()->DoDeferredTaggedToI(instr_, done());
4764 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4769 LOperand* input = instr->value();
4770 ASSERT(input->IsRegister());
4771 ASSERT(input->Equals(instr->result()));
4774 if (instr->hydrogen()->value()->representation().IsSmi()) {
4775 __ SmiToInteger32(input_reg, input_reg);
4777 DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(
this, instr);
4778 __ JumpIfNotSmi(input_reg, deferred->entry());
4779 __ SmiToInteger32(input_reg, input_reg);
4780 __ bind(deferred->exit());
4785 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4786 LOperand* input = instr->value();
4787 ASSERT(input->IsRegister());
4788 LOperand* result = instr->result();
4789 ASSERT(result->IsDoubleRegister());
4794 HValue* value = instr->hydrogen()->value();
4798 EmitNumberUntagD(input_reg, result_reg,
4799 instr->hydrogen()->can_convert_undefined_to_nan(),
4800 instr->hydrogen()->deoptimize_on_minus_zero(),
4801 instr->environment(),
4806 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4807 LOperand* input = instr->value();
4808 ASSERT(input->IsDoubleRegister());
4809 LOperand* result = instr->result();
4810 ASSERT(result->IsRegister());
4815 if (instr->truncating()) {
4816 __ TruncateDoubleToI(result_reg, input_reg);
4818 Label bailout, done;
4819 XMMRegister xmm_scratch = double_scratch0();
4820 __ DoubleToI(result_reg, input_reg, xmm_scratch,
4821 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
4823 __ jmp(&done, Label::kNear);
4831 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4832 LOperand* input = instr->value();
4833 ASSERT(input->IsDoubleRegister());
4834 LOperand* result = instr->result();
4835 ASSERT(result->IsRegister());
4840 Label bailout, done;
4841 XMMRegister xmm_scratch = double_scratch0();
4842 __ DoubleToI(result_reg, input_reg, xmm_scratch,
4843 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
4845 __ jmp(&done, Label::kNear);
4850 __ Integer32ToSmi(result_reg, result_reg);
4851 DeoptimizeIf(
overflow, instr->environment());
4855 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4856 LOperand* input = instr->value();
4862 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4863 if (!instr->hydrogen()->value()->IsHeapObject()) {
4864 LOperand* input = instr->value();
4866 DeoptimizeIf(cc, instr->environment());
4871 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4876 if (instr->hydrogen()->is_interval_check()) {
4879 instr->hydrogen()->GetCheckInterval(&first, &last);
4882 Immediate(static_cast<int8_t>(first)));
4885 if (first == last) {
4886 DeoptimizeIf(
not_equal, instr->environment());
4888 DeoptimizeIf(
below, instr->environment());
4892 Immediate(static_cast<int8_t>(last)));
4893 DeoptimizeIf(
above, instr->environment());
4899 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4905 DeoptimizeIf(tag == 0 ?
not_zero : zero, instr->environment());
4911 DeoptimizeIf(
not_equal, instr->environment());
4917 void LCodeGen::DoCheckValue(LCheckValue* instr) {
4919 __ Cmp(reg, instr->hydrogen()->object().handle());
4920 DeoptimizeIf(
not_equal, instr->environment());
4926 PushSafepointRegistersScope scope(
this);
4929 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4930 RecordSafepointWithRegisters(
4931 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4935 DeoptimizeIf(zero, instr->environment());
4939 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4942 DeferredCheckMaps(
LCodeGen* codegen, LCheckMaps* instr, Register
object)
4944 SetExit(check_maps());
4947 codegen()->DoDeferredInstanceMigration(instr_, object_);
4949 Label* check_maps() {
return &check_maps_; }
4950 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
4957 if (instr->hydrogen()->CanOmitMapChecks())
return;
4959 LOperand* input = instr->value();
4960 ASSERT(input->IsRegister());
4963 DeferredCheckMaps* deferred =
NULL;
4964 if (instr->hydrogen()->has_migration_target()) {
4965 deferred =
new(zone()) DeferredCheckMaps(
this, instr, reg);
4966 __ bind(deferred->check_maps());
4969 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
4971 for (
int i = 0; i < map_set.size() - 1; i++) {
4972 Handle<Map> map = map_set.at(i).handle();
4973 __ CompareMap(reg, map);
4974 __ j(
equal, &success, Label::kNear);
4977 Handle<Map> map = map_set.at(map_set.size() - 1).
handle();
4978 __ CompareMap(reg, map);
4979 if (instr->hydrogen()->has_migration_target()) {
4982 DeoptimizeIf(
not_equal, instr->environment());
4989 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4991 XMMRegister xmm_scratch = double_scratch0();
4992 Register result_reg =
ToRegister(instr->result());
4993 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
4997 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4998 ASSERT(instr->unclamped()->Equals(instr->result()));
4999 Register value_reg =
ToRegister(instr->result());
5000 __ ClampUint8(value_reg);
5004 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5005 ASSERT(instr->unclamped()->Equals(instr->result()));
5006 Register input_reg =
ToRegister(instr->unclamped());
5008 XMMRegister xmm_scratch = double_scratch0();
5009 Label is_smi, done, heap_number;
5010 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5011 __ JumpIfSmi(input_reg, &is_smi, dist);
5015 factory()->heap_number_map());
5016 __ j(
equal, &heap_number, Label::kNear);
5020 __ Cmp(input_reg, factory()->undefined_value());
5021 DeoptimizeIf(
not_equal, instr->environment());
5022 __ xorl(input_reg, input_reg);
5023 __ jmp(&done, Label::kNear);
5026 __ bind(&heap_number);
5028 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5029 __ jmp(&done, Label::kNear);
5033 __ SmiToInteger32(input_reg, input_reg);
5034 __ ClampUint8(input_reg);
5040 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5042 Register result_reg =
ToRegister(instr->result());
5043 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5044 __ movq(result_reg, value_reg);
5045 __ shr(result_reg, Immediate(32));
5047 __ movd(result_reg, value_reg);
5052 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5056 XMMRegister xmm_scratch = double_scratch0();
5057 __ movd(result_reg, hi_reg);
5058 __ psllq(result_reg, 32);
5059 __ movd(xmm_scratch, lo_reg);
5060 __ orps(result_reg, xmm_scratch);
5064 void LCodeGen::DoAllocate(LAllocate* instr) {
5067 DeferredAllocate(
LCodeGen* codegen, LAllocate* instr)
5070 codegen()->DoDeferredAllocate(instr_);
5072 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5077 DeferredAllocate* deferred =
5078 new(zone()) DeferredAllocate(
this, instr);
5080 Register result =
ToRegister(instr->result());
5085 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5088 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5089 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5090 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5092 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5093 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5097 if (instr->size()->IsConstantOperand()) {
5100 __ Allocate(size, result, temp,
no_reg, deferred->entry(),
flags);
5102 __ jmp(deferred->entry());
5106 __ Allocate(size, result, temp,
no_reg, deferred->entry(),
flags);
5109 __ bind(deferred->exit());
5111 if (instr->hydrogen()->MustPrefillWithFiller()) {
5112 if (instr->size()->IsConstantOperand()) {
5123 isolate()->factory()->one_pointer_filler_map());
5131 Register result =
ToRegister(instr->result());
5138 PushSafepointRegistersScope scope(
this);
5139 if (instr->size()->IsRegister()) {
5141 ASSERT(!size.is(result));
5142 __ Integer32ToSmi(size, size);
5150 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5151 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5152 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5154 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5155 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5162 CallRuntimeFromDeferred(
5163 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5164 __ StoreToSafepointRegisterSlot(result,
rax);
5168 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5171 CallRuntime(Runtime::kToFastProperties, 1, instr);
5175 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5182 int literal_offset =
5184 __ Move(
rcx, instr->hydrogen()->literals());
5186 __ CompareRoot(
rbx, Heap::kUndefinedValueRootIndex);
5193 __ Push(instr->hydrogen()->pattern());
5194 __ Push(instr->hydrogen()->flags());
5195 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5198 __ bind(&materialized);
5200 Label allocated, runtime_allocate;
5202 __ jmp(&allocated, Label::kNear);
5204 __ bind(&runtime_allocate);
5207 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5210 __ bind(&allocated);
5219 if ((size % (2 * kPointerSize)) != 0) {
5226 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5230 bool pretenure = instr->hydrogen()->pretenure();
5231 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5232 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
5233 instr->hydrogen()->is_generator());
5234 __ Move(
rbx, instr->hydrogen()->shared_info());
5235 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5238 __ Push(instr->hydrogen()->shared_info());
5239 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
5240 Heap::kFalseValueRootIndex);
5241 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
5246 void LCodeGen::DoTypeof(LTypeof* instr) {
5248 LOperand* input = instr->value();
5249 EmitPushTaggedOperand(input);
5250 CallRuntime(Runtime::kTypeof, 1, instr);
5254 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5255 ASSERT(!operand->IsDoubleRegister());
5256 if (operand->IsConstantOperand()) {
5257 __ Push(
ToHandle(LConstantOperand::cast(operand)));
5258 }
else if (operand->IsRegister()) {
5266 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5268 Condition final_branch_condition = EmitTypeofIs(instr, input);
5270 EmitBranch(instr, final_branch_condition);
5275 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5276 Label* true_label = instr->TrueLabel(chunk_);
5277 Label* false_label = instr->FalseLabel(chunk_);
5278 Handle<String> type_name = instr->type_literal();
5279 int left_block = instr->TrueDestination(chunk_);
5280 int right_block = instr->FalseDestination(chunk_);
5281 int next_block = GetNextEmittedBlock();
5283 Label::Distance true_distance = left_block == next_block ? Label::kNear
5285 Label::Distance false_distance = right_block == next_block ? Label::kNear
5288 if (type_name->Equals(heap()->number_string())) {
5289 __ JumpIfSmi(input, true_label, true_distance);
5291 Heap::kHeapNumberMapRootIndex);
5293 final_branch_condition =
equal;
5295 }
else if (type_name->Equals(heap()->string_string())) {
5296 __ JumpIfSmi(input, false_label, false_distance);
5301 final_branch_condition =
zero;
5303 }
else if (type_name->Equals(heap()->symbol_string())) {
5304 __ JumpIfSmi(input, false_label, false_distance);
5306 final_branch_condition =
equal;
5308 }
else if (type_name->Equals(heap()->boolean_string())) {
5309 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5310 __ j(
equal, true_label, true_distance);
5311 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5312 final_branch_condition =
equal;
5314 }
else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5315 __ CompareRoot(input, Heap::kNullValueRootIndex);
5316 final_branch_condition =
equal;
5318 }
else if (type_name->Equals(heap()->undefined_string())) {
5319 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5320 __ j(
equal, true_label, true_distance);
5321 __ JumpIfSmi(input, false_label, false_distance);
5328 }
else if (type_name->Equals(heap()->function_string())) {
5330 __ JumpIfSmi(input, false_label, false_distance);
5332 __ j(
equal, true_label, true_distance);
5334 final_branch_condition =
equal;
5336 }
else if (type_name->Equals(heap()->object_string())) {
5337 __ JumpIfSmi(input, false_label, false_distance);
5338 if (!FLAG_harmony_typeof) {
5339 __ CompareRoot(input, Heap::kNullValueRootIndex);
5340 __ j(
equal, true_label, true_distance);
5343 __ j(
below, false_label, false_distance);
5345 __ j(
above, false_label, false_distance);
5349 final_branch_condition =
zero;
5352 __ jmp(false_label, false_distance);
5355 return final_branch_condition;
5359 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5362 EmitIsConstructCall(temp);
5363 EmitBranch(instr,
equal);
5367 void LCodeGen::EmitIsConstructCall(Register temp) {
5372 Label check_frame_marker;
5375 __ j(
not_equal, &check_frame_marker, Label::kNear);
5379 __ bind(&check_frame_marker);
5385 void LCodeGen::EnsureSpaceForLazyDeopt(
int space_needed) {
5386 if (!
info()->IsStub()) {
5389 int current_pc = masm()->pc_offset();
5390 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5391 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5392 __ Nop(padding_size);
5395 last_lazy_deopt_pc_ = masm()->pc_offset();
5399 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5400 last_lazy_deopt_pc_ = masm()->pc_offset();
5401 ASSERT(instr->HasEnvironment());
5403 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5404 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5408 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5418 Comment(
";;; deoptimize: %s", instr->hydrogen()->reason());
5419 DeoptimizeIf(
no_condition, instr->environment(), type);
5423 void LCodeGen::DoDummy(LDummy* instr) {
5428 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5434 PushSafepointRegistersScope scope(
this);
5436 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
5437 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5438 ASSERT(instr->HasEnvironment());
5440 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5444 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5447 DeferredStackCheck(
LCodeGen* codegen, LStackCheck* instr)
5450 codegen()->DoDeferredStackCheck(instr_);
5452 virtual LInstruction* instr()
V8_OVERRIDE {
return instr_; }
5454 LStackCheck* instr_;
5457 ASSERT(instr->HasEnvironment());
5461 if (instr->hydrogen()->is_function_entry()) {
5464 __ CompareRoot(
rsp, Heap::kStackLimitRootIndex);
5467 ASSERT(instr->context()->IsRegister());
5469 CallCode(isolate()->builtins()->StackCheck(),
5470 RelocInfo::CODE_TARGET,
5474 ASSERT(instr->hydrogen()->is_backwards_branch());
5476 DeferredStackCheck* deferred_stack_check =
5477 new(zone()) DeferredStackCheck(
this, instr);
5478 __ CompareRoot(
rsp, Heap::kStackLimitRootIndex);
5479 __ j(
below, deferred_stack_check->entry());
5481 __ bind(instr->done_label());
5482 deferred_stack_check->SetExit(instr->done_label());
5483 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5491 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5499 ASSERT(!environment->HasBeenRegistered());
5500 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5502 GenerateOsrPrologue();
5506 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5508 __ CompareRoot(
rax, Heap::kUndefinedValueRootIndex);
5509 DeoptimizeIf(
equal, instr->environment());
5511 Register null_value =
rdi;
5512 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5513 __ cmpp(
rax, null_value);
5514 DeoptimizeIf(
equal, instr->environment());
5517 DeoptimizeIf(cc, instr->environment());
5523 Label use_cache, call_runtime;
5524 __ CheckEnumCache(null_value, &call_runtime);
5527 __ jmp(&use_cache, Label::kNear);
5530 __ bind(&call_runtime);
5532 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5535 Heap::kMetaMapRootIndex);
5536 DeoptimizeIf(
not_equal, instr->environment());
5537 __ bind(&use_cache);
5541 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5543 Register result =
ToRegister(instr->result());
5544 Label load_cache, done;
5545 __ EnumLength(result, map);
5548 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5549 __ jmp(&done, Label::kNear);
5550 __ bind(&load_cache);
5551 __ LoadInstanceDescriptors(map, result);
5557 Condition cc = masm()->CheckSmi(result);
5558 DeoptimizeIf(cc, instr->environment());
5562 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5563 Register
object =
ToRegister(instr->value());
5566 DeoptimizeIf(
not_equal, instr->environment());
5570 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5571 Register
object =
ToRegister(instr->object());
5574 Label out_of_object, done;
5575 __ SmiToInteger32(index, index);
5576 __ cmpl(index, Immediate(0));
5577 __ j(
less, &out_of_object, Label::kNear);
5582 __ jmp(&done, Label::kNear);
5584 __ bind(&out_of_object);
5600 #endif // V8_TARGET_ARCH_X64
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static RelocInfo::Mode RelocInfoNone()
static const int kLengthOffset
void FinishCode(Handle< Code > code)
static const int kHashFieldOffset
static const int kBitFieldOffset
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
const intptr_t kSmiTagMask
static const int kCodeEntryOffset
static const int kPrototypeOrInitialMapOffset
static int SlotOffset(int index)
static Representation Smi()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
static const int kEnumCacheOffset
static const int kMaxUtf16CodeUnit
const uint32_t kTwoByteStringTag
int StackSlotOffset(int index)
RegisterType type() const
static Smi * FromInt(int value)
Smi * ToSmi(LConstantOperand *op) const
bool NeedsEagerFrame() const
static Handle< Code > GetUninitialized(Isolate *isolate, Token::Value op)
static const int kDataOffset
static Handle< T > cast(Handle< S > that)
static const int kGlobalReceiverOffset
static Representation Integer32()
static const int kNativeByteOffset
static XMMRegister FromAllocationIndex(int index)
static const unsigned int kContainsCachedArrayIndexMask
static bool IsSupported(CpuFeature f)
static const int kStrictModeBitWithinByte
AllocationSiteOverrideMode
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
#define ASSERT(condition)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
static const int kContextOffset
const int kPointerSizeLog2
static const int kInObjectFieldCount
const uint32_t kStringRepresentationMask
MemOperand GlobalObjectOperand()
static const int kCallerFPOffset
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
static Handle< Code > initialize_stub(Isolate *isolate, ExtraICState extra_state)
static const int kInstanceClassNameOffset
bool IsDehoistedKeyConstant(LConstantOperand *op) const
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Variable * parameter(int index) const
MemOperand ContextOperand(Register context, int index)
bool IsInteger32Constant(LConstantOperand *op) const
static const int kContextOffset
void DoDeferredStackCheck(LStackCheck *instr)
Condition ReverseCondition(Condition cond)
Operand ToOperand(LOperand *op)
const uint32_t kSlotsZapValue
int32_t WhichPowerOf2Abs(int32_t x)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
friend class LEnvironment
static const int kLengthOffset
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kHoleNanUpper32
static const int kDontAdaptArgumentsSentinel
void DoDeferredNumberTagD(LNumberTagD *instr)
static uint32_t update(uint32_tprevious, AllocationSpacevalue)
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
void check(i::Vector< const uint8_t > string)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
static void MaybeCallEntryHook(MacroAssembler *masm)
Operand FieldOperand(Register object, int offset)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
const uint32_t kHoleNanLower32
static const int kMaxRegularHeapObjectSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
static const int kCallerSPOffset
static const int kCacheStampOffset
static const int kPropertiesOffset
#define ASSERT_LE(v1, v2)
int32_t ToInteger32(LConstantOperand *op) const
int num_parameters() const
static const int kMarkerOffset
bool IsFastSmiElementsKind(ElementsKind kind)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
Handle< Object > ToHandle(LConstantOperand *op) const
static const int kHeaderSize
static const int kNativeBitWithinByte
#define STATIC_ASCII_VECTOR(x)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
static int OffsetOfElementAt(int index)
static int SizeFor(int length)
bool IsSmiConstant(LConstantOperand *op) const
bool NeedsDeferredFrame() const
static const int kHeaderSize
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
static const int kMapOffset
static const int kValueOffset
bool is(Register reg) const
Handle< T > handle(T *t, Isolate *isolate)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
static const int kHasNonInstancePrototype
void WriteTranslation(LEnvironment *environment, Translation *translation)
const Register kScratchRegister
static const int kFunctionOffset
static const uint32_t kSignMask
static const int kNotDeoptimizationEntry
static const int kStrictModeByteOffset
static const int kHeaderSize
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
static Handle< T > null()
Condition NegateCondition(Condition cond)
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
static const int kConstructorOffset
const uint32_t kOneByteStringTag
static double canonical_not_the_hole_nan_as_double()
#define ASSERT_NE(v1, v2)
static const int kIsUndetectable
static const int kHeaderSize
Register ToRegister(LOperand *op) const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
void DoDeferredTaggedToI(LTaggedToI *instr)
static const int kPrototypeOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
static const int kHashShift
#define RUNTIME_ENTRY(name, nargs, ressize)
static const int kMaxLength
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
bool IsNextEmittedBlock(int block_id) const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
static const int kSharedFunctionInfoOffset
static const int kMaxValue
friend class SafepointGenerator
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
static const int kExponentOffset
bool EvalComparison(Token::Value op, double op1, double op2)
const uint32_t kStringEncodingMask
static const int kInstanceTypeOffset
static const int kMantissaOffset
friend class LDeferredCode