40 class SafepointGenerator
V8_FINAL :
public CallWrapper {
43 LPointerMap* pointers,
44 Safepoint::DeoptMode
mode)
53 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 LPointerMap* pointers_;
59 Safepoint::DeoptMode deopt_mode_;
73 : BranchGenerator(codegen),
76 virtual void Emit(Label* label)
const {
102 : BranchGenerator(codegen),
107 virtual void Emit(Label* label)
const {
132 : BranchGenerator(codegen),
137 virtual void Emit(Label* label)
const {
140 __ TestAndBranchIfAllClear(value_, mask_, label);
143 __ TestAndBranchIfAnySet(value_, mask_, label);
146 __ Tst(value_, mask_);
155 __ TestAndBranchIfAnySet(value_, mask_, label);
158 __ TestAndBranchIfAllClear(value_, mask_, label);
161 __ Tst(value_, mask_);
178 : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
180 virtual void Emit(Label* label)
const {
181 __ Fabs(scratch_, value_);
184 __ Fcmp(scratch_, 0.0);
189 __ Fabs(scratch_, value_);
190 __ Fcmp(scratch_, 0.0);
204 : BranchGenerator(codegen), value_(value) { }
206 virtual void Emit(Label* label)
const {
207 __ JumpIfHeapNumber(value_, label);
211 __ JumpIfNotHeapNumber(value_, label);
224 : BranchGenerator(codegen), value_(value), index_(index) { }
226 virtual void Emit(Label* label)
const {
227 __ JumpIfRoot(value_, index_, label);
231 __ JumpIfNotRoot(value_, index_, label);
241 Translation* translation) {
242 if (environment ==
NULL)
return;
245 int translation_size = environment->translation_size();
247 int height = translation_size - environment->parameter_count();
250 bool has_closure_id = !
info()->closure().is_null() &&
251 !
info()->closure().is_identical_to(environment->closure());
252 int closure_id = has_closure_id
253 ? DefineDeoptimizationLiteral(environment->closure())
254 : Translation::kSelfLiteralId;
256 switch (environment->frame_type()) {
258 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
261 translation->BeginConstructStubFrame(closure_id, translation_size);
264 ASSERT(translation_size == 1);
266 translation->BeginGetterStubFrame(closure_id);
269 ASSERT(translation_size == 2);
271 translation->BeginSetterStubFrame(closure_id);
274 translation->BeginCompiledStubFrame();
277 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
283 int object_index = 0;
284 int dematerialized_index = 0;
285 for (
int i = 0; i < translation_size; ++i) {
286 LOperand* value = environment->values()->at(i);
288 AddToTranslation(environment,
291 environment->HasTaggedValueAt(i),
292 environment->HasUint32ValueAt(i),
294 &dematerialized_index);
299 void LCodeGen::AddToTranslation(LEnvironment* environment,
300 Translation* translation,
304 int* object_index_pointer,
305 int* dematerialized_index_pointer) {
306 if (op == LEnvironment::materialization_marker()) {
307 int object_index = (*object_index_pointer)++;
308 if (environment->ObjectIsDuplicateAt(object_index)) {
309 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
310 translation->DuplicateObject(dupe_of);
313 int object_length = environment->ObjectLengthAt(object_index);
314 if (environment->ObjectIsArgumentsAt(object_index)) {
315 translation->BeginArgumentsObject(object_length);
317 translation->BeginCapturedObject(object_length);
319 int dematerialized_index = *dematerialized_index_pointer;
320 int env_offset = environment->translation_size() + dematerialized_index;
321 *dematerialized_index_pointer += object_length;
322 for (
int i = 0; i < object_length; ++i) {
323 LOperand* value = environment->values()->at(env_offset + i);
324 AddToTranslation(environment,
327 environment->HasTaggedValueAt(env_offset + i),
328 environment->HasUint32ValueAt(env_offset + i),
329 object_index_pointer,
330 dematerialized_index_pointer);
335 if (op->IsStackSlot()) {
337 translation->StoreStackSlot(op->index());
338 }
else if (is_uint32) {
339 translation->StoreUint32StackSlot(op->index());
341 translation->StoreInt32StackSlot(op->index());
343 }
else if (op->IsDoubleStackSlot()) {
344 translation->StoreDoubleStackSlot(op->index());
345 }
else if (op->IsRegister()) {
348 translation->StoreRegister(reg);
349 }
else if (is_uint32) {
350 translation->StoreUint32Register(reg);
352 translation->StoreInt32Register(reg);
354 }
else if (op->IsDoubleRegister()) {
356 translation->StoreDoubleRegister(reg);
357 }
else if (op->IsConstantOperand()) {
358 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
359 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
360 translation->StoreLiteral(src_index);
367 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
368 int result = deoptimization_literals_.length();
369 for (
int i = 0; i < deoptimization_literals_.length(); ++i) {
370 if (deoptimization_literals_[i].is_identical_to(literal))
return i;
372 deoptimization_literals_.Add(literal, zone());
377 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
378 Safepoint::DeoptMode
mode) {
379 if (!environment->HasBeenRegistered()) {
381 int jsframe_count = 0;
388 Translation translation(&translations_, frame_count, jsframe_count, zone());
390 int deoptimization_index = deoptimizations_.length();
391 int pc_offset = masm()->pc_offset();
392 environment->Register(deoptimization_index,
394 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
395 deoptimizations_.
Add(environment, zone());
400 void LCodeGen::CallCode(Handle<Code>
code,
401 RelocInfo::Mode mode,
402 LInstruction* instr) {
403 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
407 void LCodeGen::CallCodeGeneric(Handle<Code>
code,
408 RelocInfo::Mode mode,
410 SafepointMode safepoint_mode) {
413 Assembler::BlockPoolsScope scope(masm_);
415 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
417 if ((code->kind() == Code::BINARY_OP_IC) ||
418 (code->kind() == Code::COMPARE_IC)) {
426 void LCodeGen::DoCallFunction(LCallFunction* instr) {
431 int arity = instr->arity();
432 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
433 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
437 void LCodeGen::DoCallNew(LCallNew* instr) {
439 ASSERT(instr->IsMarkedAsCall());
442 __ Mov(x0, instr->arity());
444 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
447 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
453 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
454 ASSERT(instr->IsMarkedAsCall());
458 __ Mov(x0, Operand(instr->arity()));
459 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
467 if (instr->arity() == 0) {
468 ArrayNoArgumentConstructorStub stub(kind, override_mode);
469 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
470 }
else if (instr->arity() == 1) {
477 __ Cbz(x10, &packed_case);
480 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
481 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
483 __ Bind(&packed_case);
486 ArraySingleArgumentConstructorStub stub(kind, override_mode);
487 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
490 ArrayNArgumentsConstructorStub stub(kind, override_mode);
491 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
498 void LCodeGen::CallRuntime(
const Runtime::Function*
function,
504 __ CallRuntime(
function, num_arguments, save_doubles);
506 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
510 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
511 if (context->IsRegister()) {
513 }
else if (context->IsStackSlot()) {
515 }
else if (context->IsConstantOperand()) {
516 HConstant* constant =
517 chunk_->LookupConstant(LConstantOperand::cast(context));
518 __ LoadHeapObject(
cp,
519 Handle<HeapObject>::cast(constant->handle(isolate())));
530 LoadContextFromDeferred(context);
531 __ CallRuntimeSaveDoubles(
id);
532 RecordSafepointWithRegisters(
533 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
537 void LCodeGen::RecordAndWritePosition(
int position) {
538 if (position == RelocInfo::kNoPosition)
return;
539 masm()->positions_recorder()->RecordPosition(position);
540 masm()->positions_recorder()->WriteRecordedPositions();
544 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
545 SafepointMode safepoint_mode) {
546 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
547 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
549 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
550 RecordSafepointWithRegisters(
551 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
556 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
557 Safepoint::Kind kind,
559 Safepoint::DeoptMode deopt_mode) {
560 ASSERT(expected_safepoint_kind_ == kind);
562 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
563 Safepoint safepoint = safepoints_.DefineSafepoint(
564 masm(), kind, arguments, deopt_mode);
566 for (
int i = 0; i < operands->length(); i++) {
567 LOperand* pointer = operands->at(i);
568 if (pointer->IsStackSlot()) {
569 safepoint.DefinePointerSlot(pointer->index(), zone());
570 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
571 safepoint.DefinePointerRegister(
ToRegister(pointer), zone());
575 if (kind & Safepoint::kWithRegisters) {
577 safepoint.DefinePointerRegister(
cp, zone());
581 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
582 Safepoint::DeoptMode deopt_mode) {
583 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
587 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
588 LPointerMap empty_pointers(zone());
589 RecordSafepoint(&empty_pointers, deopt_mode);
593 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
595 Safepoint::DeoptMode deopt_mode) {
596 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
600 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
601 LPointerMap* pointers,
int arguments, Safepoint::DeoptMode deopt_mode) {
603 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
608 LPhase phase(
"Z_Code generation", chunk());
610 status_ = GENERATING;
617 return GeneratePrologue() &&
619 GenerateDeferredCode() &&
620 GenerateDeoptJumpTable() &&
621 GenerateSafepointTable();
625 void LCodeGen::SaveCallerDoubles() {
628 Comment(
";;; Save clobbered callee double registers");
629 BitVector* doubles = chunk()->allocated_double_registers();
630 BitVector::Iterator iterator(doubles);
632 while (!iterator.Done()) {
643 void LCodeGen::RestoreCallerDoubles() {
646 Comment(
";;; Restore clobbered callee double registers");
647 BitVector* doubles = chunk()->allocated_double_registers();
648 BitVector::Iterator iterator(doubles);
650 while (!iterator.Done()) {
661 bool LCodeGen::GeneratePrologue() {
664 if (
info()->IsOptimizing()) {
672 if (info_->this_has_uses() &&
673 info_->strict_mode() ==
SLOPPY &&
674 !info_->is_native()) {
676 int receiver_offset = info_->scope()->num_parameters() *
kXRegSize;
677 __ Peek(x10, receiver_offset);
678 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
682 __ Poke(x10, receiver_offset);
689 info()->set_prologue_offset(masm_->pc_offset());
692 frame_is_built_ =
true;
693 info_->AddNoFrameRange(0, masm_->pc_offset());
697 int slots = GetStackSlotCount();
702 if (
info()->saves_caller_doubles()) {
708 if (heap_slots > 0) {
709 Comment(
";;; Allocate local context");
711 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
712 FastNewContextStub stub(heap_slots);
716 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
718 RecordSafepoint(Safepoint::kNoLazyDeopt);
725 for (
int i = 0; i < num_parameters; i++) {
727 if (var->IsContextSlot()) {
729 Register scratch = x3;
737 __ Str(value, target);
739 __ RecordWriteContextSlot(
cp, target.offset(), value, scratch,
743 Comment(
";;; End allocate local context");
747 if (FLAG_trace &&
info()->IsOptimizing()) {
750 __ CallRuntime(Runtime::kTraceEnter, 0);
753 return !is_aborted();
757 void LCodeGen::GenerateOsrPrologue() {
760 if (osr_pc_offset_ >= 0)
return;
762 osr_pc_offset_ = masm()->pc_offset();
766 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
772 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
773 if (instr->IsCall()) {
776 if (!instr->IsLazyBailout() && !instr->IsGap()) {
777 safepoints_.BumpLastLazySafepointIndex();
782 bool LCodeGen::GenerateDeferredCode() {
784 if (deferred_.length() > 0) {
785 for (
int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
789 instructions_->at(code->instruction_index())->hydrogen_value();
790 RecordAndWritePosition(
791 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
793 Comment(
";;; <@%d,#%d> "
794 "-------------------- Deferred %s --------------------",
795 code->instruction_index(),
796 code->instr()->hydrogen_value()->id(),
797 code->instr()->Mnemonic());
799 __ Bind(code->entry());
802 Comment(
";;; Build frame");
805 frame_is_built_ =
true;
809 __ Add(
fp,
__ StackPointer(),
811 Comment(
";;; Deferred code");
817 Comment(
";;; Destroy frame");
820 frame_is_built_ =
false;
831 masm()->CheckConstPool(
true,
false);
833 return !is_aborted();
837 bool LCodeGen::GenerateDeoptJumpTable() {
838 if (deopt_jump_table_.length() > 0) {
839 Comment(
";;; -------------------- Jump table --------------------");
842 __ bind(&table_start);
844 for (
int i = 0; i < deopt_jump_table_.length(); i++) {
845 __ Bind(&deopt_jump_table_[i]->label);
846 Address entry = deopt_jump_table_[i]->address;
850 Comment(
";;; jump table entry %d.", i);
852 Comment(
";;; jump table entry %d: deoptimization bailout %d.", i,
id);
854 if (deopt_jump_table_[i]->needs_frame) {
857 UseScratchRegisterScope temps(masm());
858 Register stub_deopt_entry = temps.AcquireX();
859 Register stub_marker = temps.AcquireX();
861 __ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry));
862 if (needs_frame.is_bound()) {
865 __ Bind(&needs_frame);
873 __ Call(stub_deopt_entry);
876 if (
info()->saves_caller_doubles()) {
878 RestoreCallerDoubles();
882 masm()->CheckConstPool(
false,
false);
887 masm()->CheckConstPool(
true,
false);
891 if (!is_aborted()) status_ =
DONE;
892 return !is_aborted();
896 bool LCodeGen::GenerateSafepointTable() {
900 masm()->CheckVeneerPool(
true,
true);
901 safepoints_.Emit(masm(), GetStackSlotCount());
902 return !is_aborted();
908 code->set_stack_slots(GetStackSlotCount());
909 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
910 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
911 PopulateDeoptimizationData(code);
912 info()->CommitDependencies(code);
917 info()->set_bailout_reason(reason);
922 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
923 int length = deoptimizations_.length();
924 if (length == 0)
return;
926 Handle<DeoptimizationInputData> data =
927 factory()->NewDeoptimizationInputData(length,
TENURED);
929 Handle<ByteArray> translations =
930 translations_.CreateByteArray(isolate()->factory());
931 data->SetTranslationByteArray(*translations);
932 data->SetInlinedFunctionCount(
Smi::FromInt(inlined_function_count_));
933 data->SetOptimizationId(
Smi::FromInt(info_->optimization_id()));
934 if (info_->IsOptimizing()) {
937 data->SetSharedFunctionInfo(*info_->shared_info());
943 factory()->NewFixedArray(deoptimization_literals_.length(),
TENURED);
945 for (
int i = 0; i < deoptimization_literals_.length(); i++) {
946 literals->set(i, *deoptimization_literals_[i]);
948 data->SetLiteralArray(*literals);
951 data->SetOsrAstId(
Smi::FromInt(info_->osr_ast_id().ToInt()));
955 for (
int i = 0; i < length; i++) {
957 data->SetAstId(i, env->ast_id());
958 data->SetTranslationIndex(i,
Smi::FromInt(env->translation_index()));
959 data->SetArgumentsStackHeight(i,
964 code->set_deoptimization_data(*data);
968 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
969 ASSERT(deoptimization_literals_.length() == 0);
971 const ZoneList<Handle<JSFunction> >* inlined_closures =
972 chunk()->inlined_closures();
974 for (
int i = 0, length = inlined_closures->length(); i < length; i++) {
975 DefineDeoptimizationLiteral(inlined_closures->at(i));
978 inlined_function_count_ = deoptimization_literals_.length();
982 void LCodeGen::DeoptimizeBranch(
983 LEnvironment* environment,
984 BranchType branch_type, Register reg,
int bit,
986 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
990 if (override_bailout_type !=
NULL) {
991 bailout_type = *override_bailout_type;
994 ASSERT(environment->HasBeenRegistered());
996 int id = environment->deoptimization_index();
1000 if (entry ==
NULL) {
1001 Abort(kBailoutWasNotPrepared);
1004 if (FLAG_deopt_every_n_times != 0 && !
info()->IsStub()) {
1006 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1008 __ Push(x0, x1, x2);
1013 __ B(
gt, ¬_zero);
1014 __ Mov(w1, FLAG_deopt_every_n_times);
1027 if (
info()->ShouldTrapOnDeopt()) {
1030 __ Debug(
"trap_on_deopt", __LINE__,
BREAK);
1031 __ Bind(&dont_trap);
1036 if (branch_type ==
always &&
1037 frame_is_built_ && !
info()->saves_caller_doubles()) {
1042 if (deopt_jump_table_.is_empty() ||
1043 (deopt_jump_table_.last()->address != entry) ||
1044 (deopt_jump_table_.last()->bailout_type != bailout_type) ||
1045 (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
1046 Deoptimizer::JumpTableEntry* table_entry =
1047 new(zone()) Deoptimizer::JumpTableEntry(entry,
1050 deopt_jump_table_.Add(table_entry, zone());
1052 __ B(&deopt_jump_table_.last()->label,
1053 branch_type, reg, bit);
1058 void LCodeGen::Deoptimize(LEnvironment* environment,
1060 DeoptimizeBranch(environment,
always, NoReg, -1, override_bailout_type);
1064 void LCodeGen::DeoptimizeIf(
Condition cond, LEnvironment* environment) {
1065 DeoptimizeBranch(environment, static_cast<BranchType>(cond));
1069 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
1070 DeoptimizeBranch(environment,
reg_zero, rt);
1074 void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
1079 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
1081 DeoptimizeIfBitSet(rt, sign_bit, environment);
1085 void LCodeGen::DeoptimizeIfSmi(Register rt,
1086 LEnvironment* environment) {
1091 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
1096 void LCodeGen::DeoptimizeIfRoot(Register rt,
1098 LEnvironment* environment) {
1099 __ CompareRoot(rt, index);
1100 DeoptimizeIf(
eq, environment);
1104 void LCodeGen::DeoptimizeIfNotRoot(Register rt,
1106 LEnvironment* environment) {
1107 __ CompareRoot(rt, index);
1108 DeoptimizeIf(
ne, environment);
1113 LEnvironment* environment) {
1114 __ TestForMinusZero(input);
1115 DeoptimizeIf(
vs, environment);
1119 void LCodeGen::DeoptimizeIfBitSet(Register rt,
1121 LEnvironment* environment) {
1122 DeoptimizeBranch(environment,
reg_bit_set, rt, bit);
1126 void LCodeGen::DeoptimizeIfBitClear(Register rt,
1128 LEnvironment* environment) {
1133 void LCodeGen::EnsureSpaceForLazyDeopt(
int space_needed) {
1134 if (!
info()->IsStub()) {
1137 intptr_t current_pc = masm()->pc_offset();
1139 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1140 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1142 InstructionAccurateScope instruction_accurate(
1145 while (padding_size > 0) {
1151 last_lazy_deopt_pc_ = masm()->pc_offset();
1164 if (op->IsConstantOperand()) {
1175 HConstant* constant = chunk_->LookupConstant(op);
1181 ASSERT((op !=
NULL) && op->IsDoubleRegister());
1188 if (op->IsConstantOperand()) {
1189 LConstantOperand* const_op = LConstantOperand::cast(op);
1190 HConstant* constant = chunk()->LookupConstant(const_op);
1191 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1193 ASSERT(constant->HasSmiValue());
1194 return Operand(
Smi::FromInt(constant->Integer32Value()));
1195 }
else if (r.IsInteger32()) {
1196 ASSERT(constant->HasInteger32Value());
1197 return Operand(constant->Integer32Value());
1198 }
else if (r.IsDouble()) {
1199 Abort(kToOperandUnsupportedDoubleImmediate);
1202 return Operand(constant->handle(isolate()));
1203 }
else if (op->IsRegister()) {
1205 }
else if (op->IsDoubleRegister()) {
1206 Abort(kToOperandIsDoubleRegisterUnimplemented);
1225 Operand LCodeGen::ToOperand32(
LOperand* op, IntegerSignedness signedness) {
1227 if (op->IsRegister()) {
1229 }
else if (op->IsConstantOperand()) {
1230 LConstantOperand* const_op = LConstantOperand::cast(op);
1231 HConstant* constant = chunk()->LookupConstant(const_op);
1232 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1233 if (r.IsInteger32()) {
1234 ASSERT(constant->HasInteger32Value());
1236 ? constant->Integer32Value()
1237 :
static_cast<uint32_t
>(constant->Integer32Value()));
1240 Abort(kToOperand32UnsupportedImmediate);
1249 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
1257 ASSERT(!op->IsRegister());
1258 ASSERT(!op->IsDoubleRegister());
1259 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
1266 ArgumentsOffsetWithoutFrame(op->index()));
1272 HConstant* constant = chunk_->LookupConstant(op);
1273 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1274 return constant->handle(isolate());
1279 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1284 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1289 HConstant* constant = chunk_->LookupConstant(op);
1290 return constant->Integer32Value();
1295 HConstant* constant = chunk_->LookupConstant(op);
1296 ASSERT(constant->HasDoubleValue());
1297 return constant->DoubleValue();
1305 case Token::EQ_STRICT:
1309 case Token::NE_STRICT:
1313 cond = is_unsigned ?
lo :
lt;
1316 cond = is_unsigned ?
hi :
gt;
1319 cond = is_unsigned ?
ls :
le;
1322 cond = is_unsigned ?
hs :
ge;
1325 case Token::INSTANCEOF:
1333 template<
class InstrType>
1334 void LCodeGen::EmitBranchGeneric(InstrType instr,
1335 const BranchGenerator& branch) {
1336 int left_block = instr->TrueDestination(chunk_);
1337 int right_block = instr->FalseDestination(chunk_);
1339 int next_block = GetNextEmittedBlock();
1341 if (right_block == left_block) {
1342 EmitGoto(left_block);
1343 }
else if (left_block == next_block) {
1344 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1345 }
else if (right_block == next_block) {
1346 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1348 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1349 __ B(chunk_->GetAssemblyLabel(right_block));
1354 template<
class InstrType>
1355 void LCodeGen::EmitBranch(InstrType instr,
Condition condition) {
1356 ASSERT((condition !=
al) && (condition !=
nv));
1357 BranchOnCondition branch(
this, condition);
1358 EmitBranchGeneric(instr, branch);
1362 template<
class InstrType>
1363 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1365 const Register& lhs,
1366 const Operand& rhs) {
1367 ASSERT((condition !=
al) && (condition !=
nv));
1368 CompareAndBranch branch(
this, condition, lhs, rhs);
1369 EmitBranchGeneric(instr, branch);
1373 template<
class InstrType>
1374 void LCodeGen::EmitTestAndBranch(InstrType instr,
1376 const Register& value,
1378 ASSERT((condition !=
al) && (condition !=
nv));
1379 TestAndBranch branch(
this, condition, value, mask);
1380 EmitBranchGeneric(instr, branch);
1384 template<
class InstrType>
1385 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1386 const FPRegister& value,
1387 const FPRegister& scratch) {
1388 BranchIfNonZeroNumber branch(
this, value, scratch);
1389 EmitBranchGeneric(instr, branch);
1393 template<
class InstrType>
1394 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1395 const Register& value) {
1396 BranchIfHeapNumber branch(
this, value);
1397 EmitBranchGeneric(instr, branch);
1401 template<
class InstrType>
1402 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1403 const Register& value,
1405 BranchIfRoot branch(
this, value, index);
1406 EmitBranchGeneric(instr, branch);
1415 LParallelMove* move = gap->GetParallelMove(inner_pos);
1417 resolver_.Resolve(move);
1423 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1424 Register arguments =
ToRegister(instr->arguments());
1425 Register result =
ToRegister(instr->result());
1432 if (instr->length()->IsConstantOperand() &&
1433 instr->index()->IsConstantOperand()) {
1434 int index =
ToInteger32(LConstantOperand::cast(instr->index()));
1435 int length =
ToInteger32(LConstantOperand::cast(instr->length()));
1438 }
else if (instr->index()->IsConstantOperand()) {
1440 int index =
ToInteger32(LConstantOperand::cast(instr->index()));
1441 int loc = index - 1;
1443 __ Sub(result.W(), length, loc);
1451 __ Sub(result.W(), length, index);
1452 __ Add(result.W(), result.W(), 1);
1458 void LCodeGen::DoAddE(LAddE* instr) {
1459 Register result =
ToRegister(instr->result());
1461 Operand right = (instr->right()->IsConstantOperand())
1462 ?
ToInteger32(LConstantOperand::cast(instr->right()))
1466 __ Add(result, left, right);
1470 void LCodeGen::DoAddI(LAddI* instr) {
1476 __ Adds(result, left, right);
1477 DeoptimizeIf(
vs, instr->environment());
1479 __ Add(result, left, right);
1484 void LCodeGen::DoAddS(LAddS* instr) {
1486 Register result =
ToRegister(instr->result());
1488 Operand right =
ToOperand(instr->right());
1490 __ Adds(result, left, right);
1491 DeoptimizeIf(
vs, instr->environment());
1493 __ Add(result, left, right);
1498 void LCodeGen::DoAllocate(LAllocate* instr) {
1501 DeferredAllocate(
LCodeGen* codegen, LAllocate* instr)
1503 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1504 virtual LInstruction* instr() {
return instr_; }
1509 DeferredAllocate* deferred =
new(zone()) DeferredAllocate(
this, instr);
1511 Register result =
ToRegister(instr->result());
1517 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1521 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1522 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1523 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1525 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1526 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1530 if (instr->size()->IsConstantOperand()) {
1533 __ Allocate(size, result, temp1, temp2, deferred->entry(),
flags);
1535 __ B(deferred->entry());
1540 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(),
flags);
1543 __ Bind(deferred->exit());
1545 if (instr->hydrogen()->MustPrefillWithFiller()) {
1546 Register filler_count = temp1;
1547 Register filler = temp2;
1548 Register untagged_result =
ToRegister(instr->temp3());
1550 if (instr->size()->IsConstantOperand()) {
1558 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1559 __ FillFields(untagged_result, filler_count, filler);
1572 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
1575 if (instr->size()->IsConstantOperand()) {
1576 __ Mov(size,
ToSmi(LConstantOperand::cast(instr->size())));
1581 instr->hydrogen()->MustAllocateDoubleAligned());
1582 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1583 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1584 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1586 }
else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1587 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1595 CallRuntimeFromDeferred(
1596 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
1597 __ StoreToSafepointRegisterSlot(x0,
ToRegister(instr->result()));
1601 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1602 Register receiver =
ToRegister(instr->receiver());
1603 Register
function =
ToRegister(instr->function());
1606 Register elements =
ToRegister(instr->elements());
1607 Register scratch = x5;
1611 ASSERT(instr->IsMarkedAsCall());
1615 const uint32_t kArgumentsLimit = 1 *
KB;
1616 __ Cmp(length, kArgumentsLimit);
1617 DeoptimizeIf(
hi, instr->environment());
1622 Register argc = receiver;
1624 __ Sxtw(argc, length);
1632 __ Cbz(length, &invoke);
1636 __ Subs(length, length, 1);
1640 ASSERT(instr->HasPointerMap());
1641 LPointerMap* pointers = instr->pointer_map();
1645 ParameterCount actual(argc);
1646 __ InvokeFunction(
function, actual,
CALL_FUNCTION, safepoint_generator);
1650 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1651 Register result =
ToRegister(instr->result());
1653 if (instr->hydrogen()->from_inlined()) {
1662 ASSERT(masm()->StackPointer().
Is(jssp));
1666 Register previous_fp =
ToRegister(instr->temp());
1673 __ Csel(result,
fp, previous_fp,
ne);
1678 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1679 Register elements =
ToRegister(instr->elements());
1684 __ Cmp(
fp, elements);
1685 __ Mov(result, scope()->num_parameters());
1699 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1704 switch (instr->op()) {
1722 ExternalReference::mod_two_doubles_operation(isolate()),
1734 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1741 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1745 void LCodeGen::DoBitI(LBitI* instr) {
1750 switch (instr->op()) {
1751 case Token::BIT_AND:
__ And(result, left, right);
break;
1752 case Token::BIT_OR:
__ Orr(result, left, right);
break;
1753 case Token::BIT_XOR:
__ Eor(result, left, right);
break;
1761 void LCodeGen::DoBitS(LBitS* instr) {
1762 Register result =
ToRegister(instr->result());
1764 Operand right =
ToOperand(instr->right());
1766 switch (instr->op()) {
1767 case Token::BIT_AND:
__ And(result, left, right);
break;
1768 case Token::BIT_OR:
__ Orr(result, left, right);
break;
1769 case Token::BIT_XOR:
__ Eor(result, left, right);
break;
1778 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
1781 DeoptimizeIf(cc, check->environment());
1786 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1787 if (instr->hydrogen()->skip_check())
return;
1789 ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
1792 if (instr->index()->IsConstantOperand()) {
1793 int constant_index =
1794 ToInteger32(LConstantOperand::cast(instr->index()));
1796 if (instr->hydrogen()->length()->representation().IsSmi()) {
1799 __ Cmp(length, constant_index);
1802 ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
1805 Condition condition = instr->hydrogen()->allow_equality() ?
lo :
ls;
1806 ApplyCheckIf(condition, instr);
1810 void LCodeGen::DoBranch(LBranch* instr) {
1811 Representation r = instr->hydrogen()->value()->representation();
1812 Label* true_label = instr->TrueLabel(chunk_);
1813 Label* false_label = instr->FalseLabel(chunk_);
1815 if (r.IsInteger32()) {
1817 EmitCompareAndBranch(instr,
ne,
ToRegister32(instr->value()), 0);
1818 }
else if (r.IsSmi()) {
1821 EmitCompareAndBranch(instr,
ne,
ToRegister(instr->value()), 0);
1822 }
else if (r.IsDouble()) {
1825 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1829 HType type = instr->hydrogen()->value()->
type();
1831 if (type.IsBoolean()) {
1833 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1834 EmitBranch(instr,
eq);
1835 }
else if (type.IsSmi()) {
1838 }
else if (type.IsJSArray()) {
1840 EmitGoto(instr->TrueDestination(chunk()));
1841 }
else if (type.IsHeapNumber()) {
1846 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1847 }
else if (type.IsString()) {
1851 EmitCompareAndBranch(instr,
ne, temp, 0);
1853 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1860 value, Heap::kUndefinedValueRootIndex, false_label);
1866 value, Heap::kTrueValueRootIndex, true_label);
1868 value, Heap::kFalseValueRootIndex, false_label);
1874 value, Heap::kNullValueRootIndex, false_label);
1880 __ Cbz(value, false_label);
1881 __ JumpIfSmi(value, true_label);
1882 }
else if (expected.NeedsMap()) {
1884 DeoptimizeIfSmi(value, instr->environment());
1887 Register
map = NoReg;
1888 Register scratch = NoReg;
1890 if (expected.NeedsMap()) {
1897 if (expected.CanBeUndetectable()) {
1900 __ TestAndBranchIfAnySet(
1908 __ B(
ge, true_label);
1915 __ B(
ge, ¬_string);
1917 __ Cbz(scratch, false_label);
1919 __ Bind(¬_string);
1925 __ B(
eq, true_label);
1929 Label not_heap_number;
1930 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number);
1932 __ Ldr(double_scratch(),
1934 __ Fcmp(double_scratch(), 0.0);
1936 __ B(
vs, false_label);
1937 __ B(
eq, false_label);
1939 __ Bind(¬_heap_number);
1942 if (!expected.IsGeneric()) {
1945 Deoptimize(instr->environment());
1952 void LCodeGen::CallKnownFunction(Handle<JSFunction>
function,
1955 LInstruction* instr,
1956 Register function_reg) {
1957 bool dont_adapt_arguments =
1959 bool can_invoke_directly =
1960 dont_adapt_arguments || formal_parameter_count == arity;
1963 ASSERT(function_reg.Is(x1) || function_reg.IsNone());
1964 Register arity_reg = x0;
1966 LPointerMap* pointers = instr->pointer_map();
1969 if (function_reg.IsNone()) {
1971 __ LoadObject(function_reg,
function);
1974 if (FLAG_debug_code) {
1977 __ JumpIfNotSmi(function_reg, &is_not_smi);
1978 __ Abort(kExpectedFunctionObject);
1979 __ Bind(&is_not_smi);
1982 if (can_invoke_directly) {
1988 if (dont_adapt_arguments) {
1989 __ Mov(arity_reg, arity);
1997 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2000 ParameterCount count(arity);
2001 ParameterCount expected(formal_parameter_count);
2002 __ InvokeFunction(function_reg, expected, count,
CALL_FUNCTION, generator);
2007 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2008 ASSERT(instr->IsMarkedAsCall());
2011 LPointerMap* pointers = instr->pointer_map();
2014 if (instr->target()->IsConstantOperand()) {
2015 LConstantOperand* target = LConstantOperand::cast(instr->target());
2017 generator.BeforeCall(
__ CallSize(code, RelocInfo::CODE_TARGET));
2023 ASSERT(instr->target()->IsRegister());
2024 Register target =
ToRegister(instr->target());
2025 generator.BeforeCall(
__ CallSize(target));
2029 generator.AfterCall();
2033 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2034 ASSERT(instr->IsMarkedAsCall());
2037 if (instr->hydrogen()->pass_argument_count()) {
2038 __ Mov(x0, Operand(instr->arity()));
2048 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2052 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2053 CallRuntime(instr->function(), instr->arity(), instr);
2057 void LCodeGen::DoCallStub(LCallStub* instr) {
2060 switch (instr->hydrogen()->major_key()) {
2061 case CodeStub::RegExpExec: {
2062 RegExpExecStub stub;
2063 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2066 case CodeStub::SubString: {
2068 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2071 case CodeStub::StringCompare: {
2072 StringCompareStub stub;
2073 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2082 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2083 GenerateOsrPrologue();
2090 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
2093 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2094 RecordSafepointWithRegisters(
2095 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2096 __ StoreToSafepointRegisterSlot(x0, temp);
2098 DeoptimizeIfSmi(temp, instr->environment());
2102 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2105 DeferredCheckMaps(
LCodeGen* codegen, LCheckMaps* instr, Register
object)
2107 SetExit(check_maps());
2109 virtual void Generate() {
2110 codegen()->DoDeferredInstanceMigration(instr_, object_);
2112 Label* check_maps() {
return &check_maps_; }
2113 virtual LInstruction* instr() {
return instr_; }
2120 if (instr->hydrogen()->CanOmitMapChecks()) {
2126 Register
object =
ToRegister(instr->value());
2127 Register map_reg =
ToRegister(instr->temp());
2131 DeferredCheckMaps* deferred =
NULL;
2132 if (instr->hydrogen()->has_migration_target()) {
2133 deferred =
new(zone()) DeferredCheckMaps(
this, instr,
object);
2134 __ Bind(deferred->check_maps());
2137 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
2139 for (
int i = 0; i < map_set.size(); i++) {
2140 Handle<Map> map = map_set.at(i).handle();
2141 __ CompareMap(map_reg, map);
2146 if (instr->hydrogen()->has_migration_target()) {
2147 __ B(deferred->entry());
2149 Deoptimize(instr->environment());
2156 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2157 if (!instr->hydrogen()->value()->IsHeapObject()) {
2158 DeoptimizeIfSmi(
ToRegister(instr->value()), instr->environment());
2163 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2166 DeoptimizeIfNotSmi(value, instr->environment());
2170 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2172 Register scratch =
ToRegister(instr->temp());
2177 if (instr->hydrogen()->is_interval_check()) {
2179 instr->hydrogen()->GetCheckInterval(&first, &last);
2181 __ Cmp(scratch, first);
2182 if (first == last) {
2184 DeoptimizeIf(
ne, instr->environment());
2187 DeoptimizeIf(
lo, instr->environment());
2192 DeoptimizeIf(
hi, instr->environment());
2197 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2200 ASSERT((tag == 0) || (tag == mask));
2202 DeoptimizeIfBitSet(scratch,
MaskToBit(mask), instr->environment());
2204 DeoptimizeIfBitClear(scratch,
MaskToBit(mask), instr->environment());
2208 __ Tst(scratch, mask);
2210 __ And(scratch, scratch, mask);
2211 __ Cmp(scratch, tag);
2213 DeoptimizeIf(
ne, instr->environment());
2219 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2222 __ ClampDoubleToUint8(result, input, double_scratch());
2226 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2229 __ ClampInt32ToUint8(result, input);
2233 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2234 Register input =
ToRegister(instr->unclamped());
2236 Register scratch =
ToRegister(instr->temp1());
2241 __ JumpIfNotSmi(input, &is_not_smi);
2242 __ SmiUntag(result.X(), input);
2243 __ ClampInt32ToUint8(result);
2246 __ Bind(&is_not_smi);
2249 Label is_heap_number;
2251 __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
2254 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
2255 instr->environment());
2260 __ Bind(&is_heap_number);
2264 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2270 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2272 Register result_reg =
ToRegister(instr->result());
2273 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2274 __ Fmov(result_reg, value_reg);
2275 __ Mov(result_reg, Operand(result_reg,
LSR, 32));
2277 __ Fmov(result_reg.W(), value_reg.S());
2282 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2288 __ And(temp, lo_reg, Operand(0xffffffff));
2289 __ Orr(temp, temp, Operand(hi_reg,
LSL, 32));
2290 __ Fmov(result_reg, temp);
2294 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2295 Handle<String> class_name = instr->hydrogen()->class_name();
2296 Label* true_label = instr->TrueLabel(chunk_);
2297 Label* false_label = instr->FalseLabel(chunk_);
2299 Register scratch1 =
ToRegister(instr->temp1());
2300 Register scratch2 =
ToRegister(instr->temp2());
2302 __ JumpIfSmi(input, false_label);
2304 Register map = scratch2;
2305 if (class_name->IsUtf8EqualTo(
CStrVector(
"Function"))) {
2317 __ B(
lt, false_label);
2318 __ B(
eq, true_label);
2320 __ B(
eq, true_label);
2322 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2330 if (class_name->IsUtf8EqualTo(
CStrVector(
"Object"))) {
2331 __ JumpIfNotObjectType(
2334 __ JumpIfNotObjectType(
2351 EmitCompareAndBranch(instr,
eq, scratch1, Operand(class_name));
2355 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2356 ASSERT(instr->hydrogen()->representation().IsDouble());
2362 __ Fcmp(
object,
object);
2363 __ B(
vc, instr->FalseLabel(chunk_));
2366 __ Fmov(temp,
object);
2371 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2372 ASSERT(instr->hydrogen()->representation().IsTagged());
2373 Register
object =
ToRegister(instr->object());
2375 EmitBranchIfRoot(instr,
object, Heap::kTheHoleValueRootIndex);
2379 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2384 EmitCompareAndBranch(instr,
eq, map, Operand(instr->map()));
2388 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2389 Representation rep = instr->hydrogen()->value()->representation();
2390 ASSERT(!rep.IsInteger32());
2391 Register scratch =
ToRegister(instr->temp());
2393 if (rep.IsDouble()) {
2395 instr->TrueLabel(chunk()));
2398 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2401 __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
2403 EmitGoto(instr->FalseDestination(chunk()));
2407 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2408 LOperand* left = instr->left();
2409 LOperand* right = instr->right();
2410 Condition cond = TokenToCondition(instr->op(),
false);
2412 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2414 double left_val =
ToDouble(LConstantOperand::cast(left));
2415 double right_val =
ToDouble(LConstantOperand::cast(right));
2416 int next_block =
EvalComparison(instr->op(), left_val, right_val) ?
2417 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2418 EmitGoto(next_block);
2420 if (instr->is_double()) {
2421 if (right->IsConstantOperand()) {
2423 ToDouble(LConstantOperand::cast(right)));
2424 }
else if (left->IsConstantOperand()) {
2427 ToDouble(LConstantOperand::cast(left)));
2435 __ B(
vs, instr->FalseLabel(chunk_));
2436 EmitBranch(instr, cond);
2438 if (instr->hydrogen_value()->representation().IsInteger32()) {
2439 if (right->IsConstantOperand()) {
2440 EmitCompareAndBranch(instr,
2446 EmitCompareAndBranch(instr,
2452 ASSERT(instr->hydrogen_value()->representation().IsSmi());
2453 if (right->IsConstantOperand()) {
2455 EmitCompareAndBranch(instr,
2459 }
else if (left->IsConstantOperand()) {
2462 EmitCompareAndBranch(instr,
2467 EmitCompareAndBranch(instr,
2478 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2481 EmitCompareAndBranch(instr,
eq, left, right);
2485 void LCodeGen::DoCmpT(LCmpT* instr) {
2488 Condition cond = TokenToCondition(op,
false);
2493 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2499 ASSERT(instr->IsMarkedAsCall());
2500 __ LoadTrueFalseRoots(x1, x2);
2506 void LCodeGen::DoConstantD(LConstantD* instr) {
2507 ASSERT(instr->result()->IsDoubleRegister());
2509 __ Fmov(result, instr->value());
2513 void LCodeGen::DoConstantE(LConstantE* instr) {
2514 __ Mov(
ToRegister(instr->result()), Operand(instr->value()));
2518 void LCodeGen::DoConstantI(LConstantI* instr) {
2519 ASSERT(is_int32(instr->value()));
2522 __ Mov(
ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2526 void LCodeGen::DoConstantS(LConstantS* instr) {
2527 __ Mov(
ToRegister(instr->result()), Operand(instr->value()));
2531 void LCodeGen::DoConstantT(LConstantT* instr) {
2532 Handle<Object> value = instr->value(isolate());
2538 void LCodeGen::DoContext(LContext* instr) {
2540 Register result =
ToRegister(instr->result());
2541 if (
info()->IsOptimizing()) {
2550 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2552 Handle<HeapObject>
object = instr->hydrogen()->object().handle();
2554 if (isolate()->heap()->InNewSpace(*
object)) {
2555 UseScratchRegisterScope temps(masm());
2556 Register temp = temps.AcquireX();
2557 Handle<Cell> cell = isolate()->factory()->NewCell(
object);
2558 __ Mov(temp, Operand(Handle<Object>(cell)));
2562 __ Cmp(reg, Operand(
object));
2564 DeoptimizeIf(
ne, instr->environment());
2568 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2569 last_lazy_deopt_pc_ = masm()->pc_offset();
2570 ASSERT(instr->HasEnvironment());
2572 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2573 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2577 void LCodeGen::DoDateField(LDateField* instr) {
2579 Register result =
ToRegister(instr->result());
2580 Register temp1 = x10;
2581 Register temp2 = x11;
2582 Smi* index = instr->index();
2583 Label runtime, done, deopt, obj_ok;
2585 ASSERT(
object.is(result) &&
object.
Is(x0));
2586 ASSERT(instr->IsMarkedAsCall());
2588 __ JumpIfSmi(
object, &deopt);
2593 Deoptimize(instr->environment());
2596 if (index->value() == 0) {
2600 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2601 __ Mov(temp1, Operand(stamp));
2604 __ Cmp(temp1, temp2);
2612 __ Mov(x1, Operand(index));
2613 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2620 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2630 Comment(
";;; deoptimize: %s", instr->hydrogen()->reason());
2631 Deoptimize(instr->environment(), &type);
2635 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2637 int32_t divisor = instr->divisor();
2640 ASSERT(!result.is(dividend));
2643 HDiv* hdiv = instr->hydrogen();
2645 __ Cmp(dividend, 0);
2646 DeoptimizeIf(
eq, instr->environment());
2651 DeoptimizeIf(
eq, instr->environment());
2655 divisor != 1 && divisor != -1) {
2656 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2657 __ Tst(dividend, mask);
2658 DeoptimizeIf(
ne, instr->environment());
2661 if (divisor == -1) {
2662 __ Neg(result, dividend);
2667 __ Mov(result, dividend);
2668 }
else if (shift == 1) {
2669 __ Add(result, dividend, Operand(dividend,
LSR, 31));
2671 __ Mov(result, Operand(dividend,
ASR, 31));
2672 __ Add(result, dividend, Operand(result,
LSR, 32 - shift));
2674 if (shift > 0)
__ Mov(result, Operand(result,
ASR, shift));
2675 if (divisor < 0)
__ Neg(result, result);
2679 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2681 int32_t divisor = instr->divisor();
2686 Deoptimize(instr->environment());
2691 HDiv* hdiv = instr->hydrogen();
2693 DeoptimizeIfZero(dividend, instr->environment());
2696 __ TruncatingDiv(result, dividend,
Abs(divisor));
2697 if (divisor < 0)
__ Neg(result, result);
2702 __ Sxtw(dividend.X(), dividend);
2703 __ Mov(temp, divisor);
2704 __ Smsubl(temp.X(), result, temp, dividend.X());
2705 DeoptimizeIfNotZero(temp, instr->environment());
2710 void LCodeGen::DoDivI(LDivI* instr) {
2711 HBinaryOperation* hdiv = instr->hydrogen();
2718 __ Sdiv(result, dividend, divisor);
2728 __ Cbz(divisor, &deopt);
2747 __ Cmp(dividend, 1);
2757 __ Msub(remainder, result, divisor, dividend);
2758 __ Cbnz(remainder, &deopt);
2763 Deoptimize(instr->environment());
2768 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2773 DeoptimizeIfMinusZero(input, instr->environment());
2776 __ TryConvertDoubleToInt32(result, input, double_scratch());
2777 DeoptimizeIf(
ne, instr->environment());
2779 if (instr->tag_result()) {
2780 __ SmiTag(result.X());
2785 void LCodeGen::DoDrop(LDrop* instr) {
2786 __ Drop(instr->count());
2790 void LCodeGen::DoDummy(LDummy* instr) {
2795 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2800 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2803 ASSERT(instr->IsMarkedAsCall());
2807 bool pretenure = instr->hydrogen()->pretenure();
2808 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2809 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
2810 instr->hydrogen()->is_generator());
2811 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2812 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2814 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2815 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2816 : factory()->false_value()));
2817 __ Push(
cp, x2, x1);
2818 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
2823 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2825 Register result =
ToRegister(instr->result());
2826 Label load_cache, done;
2828 __ EnumLengthUntagged(result, map);
2829 __ Cbnz(result, &load_cache);
2831 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2834 __ Bind(&load_cache);
2835 __ LoadInstanceDescriptors(map, result);
2838 DeoptimizeIfZero(result, instr->environment());
2844 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2845 Register
object =
ToRegister(instr->object());
2846 Register null_value = x5;
2848 ASSERT(instr->IsMarkedAsCall());
2853 __ JumpIfRoot(
object, Heap::kUndefinedValueRootIndex, &deopt);
2855 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2856 __ Cmp(
object, null_value);
2859 __ JumpIfSmi(
object, &deopt);
2865 Label use_cache, call_runtime;
2866 __ CheckEnumCache(
object, null_value, x1, x2, x3, x4, &call_runtime);
2872 Deoptimize(instr->environment());
2875 __ Bind(&call_runtime);
2877 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2880 __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
2882 __ Bind(&use_cache);
2886 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2888 Register result =
ToRegister(instr->result());
2890 __ AssertString(input);
2895 __ IndexFromHash(result, result);
2899 void LCodeGen::EmitGoto(
int block) {
2907 void LCodeGen::DoGoto(LGoto* instr) {
2908 EmitGoto(instr->block_id());
2912 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2913 LHasCachedArrayIndexAndBranch* instr) {
2921 EmitBranch(instr,
eq);
2938 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2948 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2951 if (from == to)
return eq;
2959 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2961 Register scratch =
ToRegister(instr->temp());
2963 if (!instr->hydrogen()->value()->IsHeapObject()) {
2964 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2966 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2967 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2971 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
2972 Register result =
ToRegister(instr->result());
2973 Register base =
ToRegister(instr->base_object());
2974 if (instr->offset()->IsConstantOperand()) {
2982 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2989 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2995 __ LoadTrueFalseRoots(x0, x1);
2996 __ Csel(x0, x0, x1,
eq);
3000 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3003 DeferredInstanceOfKnownGlobal(
LCodeGen* codegen,
3004 LInstanceOfKnownGlobal* instr)
3006 virtual void Generate() {
3007 codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
3009 virtual LInstruction* instr() {
return instr_; }
3011 LInstanceOfKnownGlobal* instr_;
3014 DeferredInstanceOfKnownGlobal* deferred =
3015 new(zone()) DeferredInstanceOfKnownGlobal(
this, instr);
3017 Label map_check, return_false, cache_miss, done;
3018 Register
object =
ToRegister(instr->value());
3019 Register result =
ToRegister(instr->result());
3021 Register map_check_site = x4;
3025 ASSERT(instr->IsMarkedAsCall());
3029 Register scratch = x10;
3032 __ JumpIfSmi(
object, &return_false);
3042 InstructionAccurateScope scope(masm(), 5);
3043 __ bind(&map_check);
3045 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3046 __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
3048 __ cmp(map, scratch);
3049 __ b(&cache_miss,
ne);
3052 ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
3054 __ LoadRelocated(result, Operand(factory()->the_hole_value()));
3060 __ Bind(&cache_miss);
3063 __ Adr(map_check_site, &map_check);
3065 __ JumpIfRoot(
object, Heap::kNullValueRootIndex, &return_false);
3073 __ IsObjectJSStringType(
object, scratch,
NULL, &return_false);
3074 __ B(deferred->entry());
3076 __ Bind(&return_false);
3077 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3080 __ Bind(deferred->exit());
3086 Register result =
ToRegister(instr->result());
3096 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
3097 LoadContextFromDeferred(instr->context());
3103 InstanceofStub stub(flags);
3104 CallCodeGeneric(stub.GetCode(isolate()),
3105 RelocInfo::CODE_TARGET,
3107 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3108 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3109 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3112 __ StoreToSafepointRegisterSlot(result, result);
3116 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3121 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3124 __ Scvtf(result, value);
3128 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3132 ASSERT(instr->HasPointerMap());
3134 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3135 if (known_function.is_null()) {
3136 LPointerMap* pointers = instr->pointer_map();
3138 ParameterCount count(instr->arity());
3141 CallKnownFunction(known_function,
3142 instr->hydrogen()->formal_parameter_count(),
3150 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3158 Label check_frame_marker;
3161 __ B(
ne, &check_frame_marker);
3165 __ Bind(&check_frame_marker);
3168 EmitCompareAndBranch(
3173 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3174 Label* is_object = instr->TrueLabel(chunk_);
3175 Label* is_not_object = instr->FalseLabel(chunk_);
3178 Register scratch =
ToRegister(instr->temp2());
3180 __ JumpIfSmi(value, is_not_object);
3181 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3190 __ IsInstanceJSObjectType(map, scratch,
NULL);
3194 EmitBranch(instr,
le);
3198 Condition LCodeGen::EmitIsString(Register input,
3200 Label* is_not_string,
3203 __ JumpIfSmi(input, is_not_string);
3211 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3213 Register scratch =
ToRegister(instr->temp());
3216 instr->hydrogen()->value()->IsHeapObject()
3219 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3221 EmitBranch(instr, true_cond);
3225 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3232 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3236 if (!instr->hydrogen()->value()->IsHeapObject()) {
3237 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3246 static const char* LabelType(LLabel* label) {
3247 if (label->is_loop_header())
return " (loop header)";
3248 if (label->is_osr_entry())
return " (OSR entry)";
3253 void LCodeGen::DoLabel(LLabel* label) {
3254 Comment(
";;; <@%d,#%d> -------------------- B%d%s --------------------",
3255 current_instruction_,
3256 label->hydrogen_value()->id(),
3260 __ Bind(label->label());
3261 current_block_ = label->block_id();
3266 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3267 Register context =
ToRegister(instr->context());
3268 Register result =
ToRegister(instr->result());
3270 if (instr->hydrogen()->RequiresHoleCheck()) {
3271 if (instr->hydrogen()->DeoptimizesOnHole()) {
3272 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3273 instr->environment());
3276 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
3277 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3278 __ Bind(¬_the_hole);
3284 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3285 Register
function =
ToRegister(instr->function());
3286 Register result =
ToRegister(instr->result());
3304 __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
3308 __ CompareObjectType(result, temp, temp,
MAP_TYPE);
3317 __ Bind(&non_instance);
3323 Deoptimize(instr->environment());
3330 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3331 Register result =
ToRegister(instr->result());
3332 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3334 if (instr->hydrogen()->RequiresHoleCheck()) {
3336 result, Heap::kTheHoleValueRootIndex, instr->environment());
3341 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3345 __ Mov(x2, Operand(instr->name()));
3348 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3352 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3357 bool key_is_constant,
3360 int additional_index) {
3366 if (key_is_constant) {
3367 int base_offset = ((constant_key + additional_index) << element_size_shift);
3368 return MemOperand(base, base_offset + additional_offset);
3371 if (additional_index == 0) {
3375 return MemOperand(scratch, additional_offset);
3378 if (additional_offset == 0) {
3381 __ Add(scratch, base, Operand(key,
SXTW, element_size_shift));
3382 return MemOperand(scratch, additional_offset);
3387 if (additional_offset == 0) {
3389 __ SmiUntag(scratch, key);
3390 __ Add(scratch.W(), scratch.W(), additional_index);
3392 __ Add(scratch.W(), key.W(), additional_index);
3397 __ Add(scratch, base,
3400 __ Add(scratch, base, Operand(key,
SXTW, element_size_shift));
3404 (additional_index << element_size_shift) + additional_offset);
3410 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3411 Register ext_ptr =
ToRegister(instr->elements());
3415 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3416 bool key_is_constant = instr->key()->IsConstantOperand();
3418 int constant_key = 0;
3419 if (key_is_constant) {
3421 constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
3422 if (constant_key & 0xf0000000) {
3423 Abort(kArrayIndexConstantValueTooBig);
3431 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3432 key_is_constant, constant_key,
3434 instr->additional_index());
3439 __ Ldr(result.S(), mem_op);
3440 __ Fcvt(result, result.S());
3444 __ Ldr(result, mem_op);
3446 Register result =
ToRegister(instr->result());
3448 switch (elements_kind) {
3451 __ Ldrsb(result, mem_op);
3457 __ Ldrb(result, mem_op);
3461 __ Ldrsh(result, mem_op);
3465 __ Ldrh(result, mem_op);
3469 __ Ldrsw(result, mem_op);
3473 __ Ldr(result.W(), mem_op);
3476 __ Tst(result, 0xFFFFFFFF80000000);
3477 DeoptimizeIf(
ne, instr->environment());
3499 void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
3510 if (key_is_tagged) {
3515 ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
3516 __ Add(base, elements, Operand(key,
SXTW, element_size_shift));
3521 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3522 Register elements =
ToRegister(instr->elements());
3527 if (instr->key()->IsConstantOperand()) {
3528 ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
3529 (instr->temp() ==
NULL));
3531 int constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
3532 if (constant_key & 0xf0000000) {
3533 Abort(kArrayIndexConstantValueTooBig);
3536 instr->additional_index());
3537 load_base = elements;
3541 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3542 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3543 instr->hydrogen()->elements_kind());
3548 if (instr->hydrogen()->RequiresHoleCheck()) {
3549 Register scratch =
ToRegister(instr->temp());
3553 __ Fmov(scratch, result);
3555 DeoptimizeIf(
eq, instr->environment());
3560 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3561 Register elements =
ToRegister(instr->elements());
3562 Register result =
ToRegister(instr->result());
3566 if (instr->key()->IsConstantOperand()) {
3568 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3570 instr->additional_index());
3571 load_base = elements;
3575 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3576 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3577 instr->hydrogen()->elements_kind());
3580 Representation representation = instr->hydrogen()->representation();
3582 if (representation.IsInteger32() &&
3592 if (instr->hydrogen()->RequiresHoleCheck()) {
3594 DeoptimizeIfNotSmi(result, instr->environment());
3596 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3597 instr->environment());
3603 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3608 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3609 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3615 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3616 HObjectAccess access = instr->hydrogen()->access();
3617 int offset = access.offset();
3618 Register
object =
ToRegister(instr->object());
3620 if (access.IsExternalMemory()) {
3621 Register result =
ToRegister(instr->result());
3626 if (instr->hydrogen()->representation().IsDouble()) {
3632 Register result =
ToRegister(instr->result());
3634 if (access.IsInobject()) {
3642 if (access.representation().IsSmi() &&
3643 instr->hydrogen()->representation().IsInteger32()) {
3654 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3658 __ Mov(x2, Operand(instr->name()));
3661 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3667 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3668 Register result =
ToRegister(instr->result());
3669 __ LoadRoot(result, instr->index());
3673 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3674 Register result =
ToRegister(instr->result());
3676 __ EnumLengthSmi(result, map);
3680 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3681 Representation r = instr->hydrogen()->value()->representation();
3685 __ Fabs(result, input);
3686 }
else if (r.IsSmi() || r.IsInteger32()) {
3687 Register input = r.IsSmi() ?
ToRegister(instr->value())
3689 Register result = r.IsSmi() ?
ToRegister(instr->result())
3693 Deoptimize(instr->environment());
3699 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3701 Label* allocation_entry) {
3716 Register result_bits =
ToRegister(instr->temp3());
3717 Register result =
ToRegister(instr->result());
3719 Label runtime_allocation;
3723 DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
3724 instr->environment());
3731 __ Mov(result, input);
3739 __ Bind(allocation_entry);
3740 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3744 __ Bind(&runtime_allocation);
3745 if (FLAG_debug_code) {
3752 __ JumpIfSmi(result, &result_ok);
3753 __ Cmp(input, result);
3754 __ Assert(
eq, kUnexpectedValue);
3755 __ Bind(&result_ok);
3758 { PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
3759 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3761 __ StoreToSafepointRegisterSlot(x0, result);
3767 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3771 DeferredMathAbsTagged(
LCodeGen* codegen, LMathAbsTagged* instr)
3773 virtual void Generate() {
3774 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3775 allocation_entry());
3777 virtual LInstruction* instr() {
return instr_; }
3778 Label* allocation_entry() {
return &allocation; }
3780 LMathAbsTagged* instr_;
3788 DeferredMathAbsTagged* deferred =
3789 new(zone()) DeferredMathAbsTagged(
this, instr);
3791 ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
3792 instr->hydrogen()->value()->representation().IsSmi());
3794 Register result_bits =
ToRegister(instr->temp3());
3795 Register result =
ToRegister(instr->result());
3804 __ JumpIfNotSmi(input, deferred->entry());
3810 __ Mov(result_bits, double_to_rawbits(0x80000000));
3811 __ B(deferred->allocation_entry());
3813 __ Bind(deferred->exit());
3820 void LCodeGen::DoMathExp(LMathExp* instr) {
3830 double_temp1, double_temp2,
3831 temp1, temp2, temp3);
3835 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3839 Register result =
ToRegister(instr->result());
3842 DeoptimizeIfMinusZero(input, instr->environment());
3845 __ Fcvtms(result, input);
3849 __ Cmp(result, Operand(result,
SXTW));
3852 DeoptimizeIf(
ne, instr->environment());
3856 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3859 int32_t divisor = instr->divisor();
3863 if (divisor == 1)
return;
3866 __ Mov(result, Operand(dividend,
ASR, shift));
3871 Label not_kmin_int, done;
3872 __ Negs(result, dividend);
3874 DeoptimizeIf(
eq, instr->environment());
3879 if (divisor == -1) {
3880 DeoptimizeIf(
vs, instr->environment());
3882 __ B(
vc, ¬_kmin_int);
3887 __ bind(¬_kmin_int);
3888 __ Mov(result, Operand(dividend,
ASR, shift));
3893 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3895 int32_t divisor = instr->divisor();
3900 Deoptimize(instr->environment());
3905 HMathFloorOfDiv* hdiv = instr->hydrogen();
3907 __ Cmp(dividend, 0);
3908 DeoptimizeIf(
eq, instr->environment());
3915 __ TruncatingDiv(result, dividend,
Abs(divisor));
3916 if (divisor < 0)
__ Neg(result, result);
3924 Label needs_adjustment, done;
3925 __ Cmp(dividend, 0);
3926 __ B(divisor > 0 ?
lt :
gt, &needs_adjustment);
3927 __ TruncatingDiv(result, dividend,
Abs(divisor));
3928 if (divisor < 0)
__ Neg(result, result);
3930 __ bind(&needs_adjustment);
3931 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3932 __ TruncatingDiv(result, temp,
Abs(divisor));
3933 if (divisor < 0)
__ Neg(result, result);
3934 __ Sub(result, result, Operand(1));
3939 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
3947 __ Sdiv(result, dividend, divisor);
3950 DeoptimizeIfZero(divisor, instr->environment());
3955 __ Cmp(dividend, 1);
3957 DeoptimizeIf(
eq, instr->environment());
3967 DeoptimizeIf(
eq, instr->environment());
3972 __ Eor(remainder, dividend, divisor);
3976 __ Msub(remainder, result, divisor, dividend);
3977 __ Cbz(remainder, &done);
3978 __ Sub(result, result, 1);
3984 void LCodeGen::DoMathLog(LMathLog* instr) {
3985 ASSERT(instr->IsMarkedAsCall());
3987 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3993 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3996 __ Clz(result, input);
4000 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4011 __ Fmov(double_scratch(), kFP64NegativeInfinity);
4012 __ Fcmp(double_scratch(), input);
4013 __ Fabs(result, input);
4017 __ Fadd(double_scratch(), input, fp_zero);
4018 __ Fsqrt(result, double_scratch());
4024 void LCodeGen::DoPower(LPower* instr) {
4025 Representation exponent_type = instr->hydrogen()->right()->representation();
4028 ASSERT(!instr->right()->IsDoubleRegister() ||
4030 ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4032 ASSERT(!exponent_type.IsInteger32() ||
ToRegister(instr->right()).is(x12));
4036 if (exponent_type.IsSmi()) {
4039 }
else if (exponent_type.IsTagged()) {
4041 __ JumpIfSmi(x11, &no_deopt);
4043 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
4044 instr->environment());
4048 }
else if (exponent_type.IsInteger32()) {
4051 Register exponent =
ToRegister(instr->right());
4052 __ Sxtw(exponent, exponent);
4056 ASSERT(exponent_type.IsDouble());
4063 void LCodeGen::DoMathRound(LMathRound* instr) {
4067 Register result =
ToRegister(instr->result());
4080 __ Fmov(dot_five, 0.5);
4081 __ Fabs(temp1, input);
4082 __ Fcmp(temp1, dot_five);
4086 __ B(
hi, &try_rounding);
4089 __ Fmov(result, input);
4090 DeoptimizeIfNegative(result, instr->environment());
4092 __ Fcmp(input, dot_five);
4096 __ Csel(result, result, xzr,
eq);
4099 __ Bind(&try_rounding);
4105 __ Fadd(temp1, input, dot_five);
4106 __ Fcvtms(result, temp1);
4111 __ Fcmp(input, 0.0);
4113 DeoptimizeIf(
ne, instr->environment());
4119 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4122 __ Fsqrt(result, input);
4126 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4127 HMathMinMax::Operation op = instr->hydrogen()->operation();
4128 if (instr->hydrogen()->representation().IsInteger32()) {
4133 __ Cmp(left, right);
4134 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ?
ge :
le);
4135 }
else if (instr->hydrogen()->representation().IsSmi()) {
4136 Register result =
ToRegister(instr->result());
4138 Operand right =
ToOperand(instr->right());
4140 __ Cmp(left, right);
4141 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ?
ge :
le);
4143 ASSERT(instr->hydrogen()->representation().IsDouble());
4148 if (op == HMathMinMax::kMathMax) {
4149 __ Fmax(result, left, right);
4151 ASSERT(op == HMathMinMax::kMathMin);
4152 __ Fmin(result, left, right);
4158 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4160 int32_t divisor = instr->divisor();
4169 HMod* hmod = instr->hydrogen();
4170 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4171 Label dividend_is_not_negative, done;
4173 __ Cmp(dividend, 0);
4174 __ B(
pl, ÷nd_is_not_negative);
4176 __ Neg(dividend, dividend);
4177 __ And(dividend, dividend, mask);
4178 __ Negs(dividend, dividend);
4180 DeoptimizeIf(
eq, instr->environment());
4185 __ bind(÷nd_is_not_negative);
4186 __ And(dividend, dividend, mask);
4191 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4193 int32_t divisor = instr->divisor();
4199 Deoptimize(instr->environment());
4203 __ TruncatingDiv(result, dividend,
Abs(divisor));
4204 __ Sxtw(dividend.X(), dividend);
4205 __ Mov(temp,
Abs(divisor));
4206 __ Smsubl(result.X(), result, temp, dividend.X());
4209 HMod* hmod = instr->hydrogen();
4211 Label remainder_not_zero;
4212 __ Cbnz(result, &remainder_not_zero);
4213 DeoptimizeIfNegative(dividend, instr->environment());
4214 __ bind(&remainder_not_zero);
4219 void LCodeGen::DoModI(LModI* instr) {
4226 __ Sdiv(result, dividend, divisor);
4230 __ Cbnz(divisor, &ok);
4232 Deoptimize(instr->environment());
4235 __ Msub(result, result, divisor, dividend);
4237 __ Cbnz(result, &done);
4238 if (deopt.is_bound()) {
4241 DeoptimizeIfNegative(dividend, instr->environment());
4248 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4249 ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
4250 bool is_smi = instr->hydrogen()->representation().IsSmi();
4259 bool bailout_on_minus_zero =
4262 if (bailout_on_minus_zero) {
4265 DeoptimizeIfZero(left, instr->environment());
4266 }
else if (right == 0) {
4268 DeoptimizeIfNegative(left, instr->environment());
4277 __ Negs(result, left);
4278 DeoptimizeIf(
vs, instr->environment());
4280 __ Neg(result, left);
4293 __ Adds(result, left, left);
4294 DeoptimizeIf(
vs, instr->environment());
4296 __ Add(result, left, left);
4309 Register scratch = result;
4311 __ Cls(scratch, left);
4312 __ Cmp(scratch, right_log2);
4313 DeoptimizeIf(
lt, instr->environment());
4318 __ Lsl(result, left, right_log2);
4322 __ Negs(result, Operand(left,
LSL, right_log2));
4323 DeoptimizeIf(
vs, instr->environment());
4325 __ Neg(result, Operand(left,
LSL, right_log2));
4344 __ Neg(result, result);
4355 __ Neg(result, result);
4364 void LCodeGen::DoMulI(LMulI* instr) {
4370 bool bailout_on_minus_zero =
4373 if (bailout_on_minus_zero && !left.Is(right)) {
4381 DeoptimizeIf(
mi, instr->environment());
4385 __ Smull(result.X(), left, right);
4386 __ Cmp(result.X(), Operand(result,
SXTW));
4387 DeoptimizeIf(
ne, instr->environment());
4389 __ Mul(result, left, right);
4394 void LCodeGen::DoMulS(LMulS* instr) {
4395 Register result =
ToRegister(instr->result());
4400 bool bailout_on_minus_zero =
4403 if (bailout_on_minus_zero && !left.Is(right)) {
4411 DeoptimizeIf(
mi, instr->environment());
4416 __ Smulh(result, left, right);
4417 __ Cmp(result, Operand(result.W(),
SXTW));
4419 DeoptimizeIf(
ne, instr->environment());
4426 __ Mul(result, result, result);
4427 }
else if (result.Is(left) && !left.Is(right)) {
4430 __ SmiUntag(result, left);
4431 __ Mul(result, result, right);
4433 ASSERT(!left.Is(result));
4437 __ SmiUntag(result, right);
4438 __ Mul(result, left, result);
4448 Register result =
ToRegister(instr->result());
4451 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4458 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4459 RecordSafepointWithRegisters(
4460 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4461 __ StoreToSafepointRegisterSlot(x0, result);
4465 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4468 DeferredNumberTagD(
LCodeGen* codegen, LNumberTagD* instr)
4470 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4471 virtual LInstruction* instr() {
return instr_; }
4473 LNumberTagD* instr_;
4477 Register result =
ToRegister(instr->result());
4481 DeferredNumberTagD* deferred =
new(zone()) DeferredNumberTagD(
this, instr);
4482 if (FLAG_inline_new) {
4483 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4485 __ B(deferred->entry());
4488 __ Bind(deferred->exit());
4493 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4497 Label slow, convert_and_store;
4502 if (FLAG_inline_new) {
4504 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4505 __ B(&convert_and_store);
4516 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4524 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4525 RecordSafepointWithRegisters(
4526 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4527 __ StoreToSafepointRegisterSlot(x0, dst);
4532 __ Bind(&convert_and_store);
4534 __ Ucvtf(dbl_scratch, src);
4539 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4542 DeferredNumberTagU(
LCodeGen* codegen, LNumberTagU* instr)
4544 virtual void Generate() {
4545 codegen()->DoDeferredNumberTagU(instr_,
4550 virtual LInstruction* instr() {
return instr_; }
4552 LNumberTagU* instr_;
4556 Register result =
ToRegister(instr->result());
4558 DeferredNumberTagU* deferred =
new(zone()) DeferredNumberTagU(
this, instr);
4560 __ B(
hi, deferred->entry());
4561 __ SmiTag(result, value.X());
4562 __ Bind(deferred->exit());
4566 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4568 Register scratch =
ToRegister(instr->temp());
4570 bool can_convert_undefined_to_nan =
4571 instr->hydrogen()->can_convert_undefined_to_nan();
4573 Label done, load_smi;
4576 HValue* value = instr->hydrogen()->value();
4581 __ JumpIfSmi(input, &load_smi);
4583 Label convert_undefined;
4587 if (can_convert_undefined_to_nan) {
4588 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4589 &convert_undefined);
4591 DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4592 instr->environment());
4597 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4598 DeoptimizeIfMinusZero(result, instr->environment());
4602 if (can_convert_undefined_to_nan) {
4603 __ Bind(&convert_undefined);
4604 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
4605 instr->environment());
4607 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4619 __ SmiUntagToDouble(result, input);
4625 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4633 ASSERT(!environment->HasBeenRegistered());
4634 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4636 GenerateOsrPrologue();
4640 void LCodeGen::DoParameter(LParameter* instr) {
4645 void LCodeGen::DoPushArgument(LPushArgument* instr) {
4646 LOperand* argument = instr->value();
4647 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
4648 Abort(kDoPushArgumentNotImplementedForDoubleType);
4655 void LCodeGen::DoReturn(LReturn* instr) {
4656 if (FLAG_trace &&
info()->IsOptimizing()) {
4663 __ CallRuntime(Runtime::kTraceExit, 1);
4666 if (
info()->saves_caller_doubles()) {
4667 RestoreCallerDoubles();
4670 int no_frame_start = -1;
4672 Register stack_pointer = masm()->StackPointer();
4673 __ Mov(stack_pointer,
fp);
4674 no_frame_start = masm_->pc_offset();
4678 if (instr->has_constant_parameter_count()) {
4679 int parameter_count =
ToInteger32(instr->constant_parameter_count());
4680 __ Drop(parameter_count + 1);
4682 Register parameter_count =
ToRegister(instr->parameter_count());
4683 __ DropBySMI(parameter_count);
4687 if (no_frame_start != -1) {
4688 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4693 MemOperand LCodeGen::BuildSeqStringOperand(Register
string,
4697 if (index->IsConstantOperand()) {
4698 int offset =
ToInteger32(LConstantOperand::cast(index));
4716 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4718 Register
string =
ToRegister(instr->string());
4719 Register result =
ToRegister(instr->result());
4722 if (FLAG_debug_code) {
4726 UseScratchRegisterScope temps(masm());
4727 Register dbg_temp = temps.AcquireX();
4732 __ And(dbg_temp, dbg_temp,
4737 ? one_byte_seq_type : two_byte_seq_type));
4738 __ Check(
eq, kUnexpectedStringType);
4742 BuildSeqStringOperand(
string, temp, instr->index(), encoding);
4744 __ Ldrb(result, operand);
4746 __ Ldrh(result, operand);
4751 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4753 Register
string =
ToRegister(instr->string());
4757 if (FLAG_debug_code) {
4764 ? one_byte_seq_type : two_byte_seq_type;
4769 BuildSeqStringOperand(
string, temp, instr->index(), encoding);
4771 __ Strb(value, operand);
4773 __ Strh(value, operand);
4778 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4779 HChange* hchange = instr->hydrogen();
4781 Register output =
ToRegister(instr->result());
4784 DeoptimizeIfNegative(input.W(), instr->environment());
4786 __ SmiTag(output, input);
4790 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4792 Register result =
ToRegister(instr->result());
4795 if (instr->needs_check()) {
4796 DeoptimizeIfNotSmi(input, instr->environment());
4800 __ SmiUntag(result, input);
4805 void LCodeGen::DoShiftI(LShiftI* instr) {
4806 LOperand* right_op = instr->right();
4810 if (right_op->IsRegister()) {
4812 switch (instr->op()) {
4814 case Token::SAR:
__ Asr(result, left, right);
break;
4815 case Token::SHL:
__ Lsl(result, left, right);
break;
4817 if (instr->can_deopt()) {
4818 Label right_not_zero;
4819 __ Cbnz(right, &right_not_zero);
4820 DeoptimizeIfNegative(left, instr->environment());
4821 __ Bind(&right_not_zero);
4823 __ Lsr(result, left, right);
4828 ASSERT(right_op->IsConstantOperand());
4829 int shift_count =
ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4830 if (shift_count == 0) {
4831 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4832 DeoptimizeIfNegative(left, instr->environment());
4836 switch (instr->op()) {
4837 case Token::ROR:
__ Ror(result, left, shift_count);
break;
4838 case Token::SAR:
__ Asr(result, left, shift_count);
break;
4839 case Token::SHL:
__ Lsl(result, left, shift_count);
break;
4840 case Token::SHR:
__ Lsr(result, left, shift_count);
break;
4848 void LCodeGen::DoShiftS(LShiftS* instr) {
4849 LOperand* right_op = instr->right();
4851 Register result =
ToRegister(instr->result());
4855 (instr->temp() ==
NULL));
4857 if (right_op->IsRegister()) {
4859 switch (instr->op()) {
4863 __ SmiUntag(result, left);
4864 __ Ror(result.W(), result.W(), temp.W());
4870 __ Asr(result, left, result);
4875 __ Lsl(result, left, result);
4878 if (instr->can_deopt()) {
4879 Label right_not_zero;
4880 __ Cbnz(right, &right_not_zero);
4881 DeoptimizeIfNegative(left, instr->environment());
4882 __ Bind(&right_not_zero);
4885 __ Lsr(result, left, result);
4891 ASSERT(right_op->IsConstantOperand());
4892 int shift_count =
ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4893 if (shift_count == 0) {
4894 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4895 DeoptimizeIfNegative(left, instr->environment());
4897 __ Mov(result, left);
4899 switch (instr->op()) {
4901 __ SmiUntag(result, left);
4902 __ Ror(result.W(), result.W(), shift_count);
4906 __ Asr(result, left, shift_count);
4910 __ Lsl(result, left, shift_count);
4913 __ Lsr(result, left, shift_count);
4923 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
4924 __ Debug(
"LDebugBreak", 0,
BREAK);
4928 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
4930 Register scratch1 = x5;
4931 Register scratch2 = x6;
4932 ASSERT(instr->IsMarkedAsCall());
4937 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
4939 __ Push(
cp, scratch1, scratch2);
4940 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
4945 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4946 LoadContextFromDeferred(instr->context());
4947 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
4948 RecordSafepointWithLazyDeopt(
4949 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4950 ASSERT(instr->HasEnvironment());
4952 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4956 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4959 DeferredStackCheck(
LCodeGen* codegen, LStackCheck* instr)
4961 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4962 virtual LInstruction* instr() {
return instr_; }
4964 LStackCheck* instr_;
4967 ASSERT(instr->HasEnvironment());
4971 if (instr->hydrogen()->is_function_entry()) {
4974 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4977 PredictableCodeSizeScope predictable(masm_,
4979 ASSERT(instr->context()->IsRegister());
4981 CallCode(isolate()->builtins()->StackCheck(),
4982 RelocInfo::CODE_TARGET,
4986 ASSERT(instr->hydrogen()->is_backwards_branch());
4988 DeferredStackCheck* deferred_stack_check =
4989 new(zone()) DeferredStackCheck(
this, instr);
4990 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4991 __ B(
lo, deferred_stack_check->entry());
4994 __ Bind(instr->done_label());
4995 deferred_stack_check->SetExit(instr->done_label());
4996 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5004 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5005 Register
function =
ToRegister(instr->function());
5006 Register code_object =
ToRegister(instr->code_object());
5013 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5014 Register context =
ToRegister(instr->context());
5016 Register scratch =
ToRegister(instr->temp());
5019 Label skip_assignment;
5021 if (instr->hydrogen()->RequiresHoleCheck()) {
5022 __ Ldr(scratch, target);
5023 if (instr->hydrogen()->DeoptimizesOnHole()) {
5024 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
5025 instr->environment());
5027 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5031 __ Str(value, target);
5032 if (instr->hydrogen()->NeedsWriteBarrier()) {
5034 instr->hydrogen()->value()->IsHeapObject()
5036 __ RecordWriteContextSlot(context,
5045 __ Bind(&skip_assignment);
5049 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
5054 __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5060 if (instr->hydrogen()->RequiresHoleCheck()) {
5061 Register payload =
ToRegister(instr->temp2());
5064 payload, Heap::kTheHoleValueRootIndex, instr->environment());
5073 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5074 Register ext_ptr =
ToRegister(instr->elements());
5079 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5080 bool key_is_constant = instr->key()->IsConstantOperand();
5081 int constant_key = 0;
5082 if (key_is_constant) {
5084 constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
5085 if (constant_key & 0xf0000000) {
5086 Abort(kArrayIndexConstantValueTooBig);
5094 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5095 key_is_constant, constant_key,
5097 instr->additional_index());
5103 __ Fcvt(dbl_scratch.S(), value);
5104 __ Str(dbl_scratch.S(), dst);
5112 switch (elements_kind) {
5119 __ Strb(value, dst);
5125 __ Strh(value, dst);
5131 __ Str(value.W(), dst);
5152 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5153 Register elements =
ToRegister(instr->elements());
5155 Register store_base =
no_reg;
5158 if (instr->key()->IsConstantOperand()) {
5159 int constant_key =
ToInteger32(LConstantOperand::cast(instr->key()));
5160 if (constant_key & 0xf0000000) {
5161 Abort(kArrayIndexConstantValueTooBig);
5164 instr->additional_index());
5165 store_base = elements;
5169 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5170 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
5171 instr->hydrogen()->elements_kind());
5175 if (instr->NeedsCanonicalization()) {
5177 __ Fmov(dbl_scratch,
5179 __ Fmaxnm(dbl_scratch, dbl_scratch, value);
5187 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5189 Register elements =
ToRegister(instr->elements());
5190 Register scratch =
no_reg;
5191 Register store_base =
no_reg;
5195 if (!instr->key()->IsConstantOperand() ||
5196 instr->hydrogen()->NeedsWriteBarrier()) {
5200 if (instr->key()->IsConstantOperand()) {
5201 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5203 instr->additional_index());
5204 store_base = elements;
5206 store_base = scratch;
5208 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5209 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
5210 instr->hydrogen()->elements_kind());
5213 Representation representation = instr->hydrogen()->value()->representation();
5214 if (representation.IsInteger32()) {
5224 if (instr->hydrogen()->NeedsWriteBarrier()) {
5225 ASSERT(representation.IsTagged());
5227 Register element_addr = scratch;
5229 instr->hydrogen()->value()->IsHeapObject()
5239 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5245 Handle<Code> ic = instr->strict_mode() ==
STRICT
5246 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5247 : isolate()->builtins()->KeyedStoreIC_Initialize();
5248 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5252 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5253 Representation representation = instr->representation();
5255 Register
object =
ToRegister(instr->object());
5256 HObjectAccess access = instr->hydrogen()->access();
5257 Handle<Map> transition = instr->transition();
5258 int offset = access.offset();
5260 if (access.IsExternalMemory()) {
5261 ASSERT(transition.is_null());
5262 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5264 __ Store(value,
MemOperand(
object, offset), representation);
5266 }
else if (representation.IsDouble()) {
5267 ASSERT(transition.is_null());
5268 ASSERT(access.IsInobject());
5269 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5277 SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject()
5280 ASSERT(!(representation.IsSmi() &&
5281 instr->value()->IsConstantOperand() &&
5283 if (representation.IsHeapObject() &&
5284 !instr->hydrogen()->value()->type().IsHeapObject()) {
5285 DeoptimizeIfSmi(value, instr->environment());
5291 if (!transition.is_null()) {
5293 Register new_map_value =
ToRegister(instr->temp0());
5294 __ Mov(new_map_value, Operand(transition));
5296 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5298 __ RecordWriteField(
object,
5310 Register destination;
5311 if (access.IsInobject()) {
5312 destination = object;
5316 destination = temp0;
5319 if (representation.IsSmi() &&
5320 instr->hydrogen()->value()->representation().IsInteger32()) {
5325 __ AssertSmi(temp0);
5328 if (destination.Is(temp0)) {
5329 ASSERT(!access.IsInobject());
5339 if (instr->hydrogen()->NeedsWriteBarrier()) {
5340 __ RecordWriteField(destination,
5352 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5358 __ Mov(x2, Operand(instr->name()));
5360 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5364 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5368 StringAddStub stub(instr->hydrogen()->flags(),
5369 instr->hydrogen()->pretenure_flag());
5370 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5374 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5377 DeferredStringCharCodeAt(
LCodeGen* codegen, LStringCharCodeAt* instr)
5379 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5380 virtual LInstruction* instr() {
return instr_; }
5382 LStringCharCodeAt* instr_;
5385 DeferredStringCharCodeAt* deferred =
5386 new(zone()) DeferredStringCharCodeAt(
this, instr);
5393 __ Bind(deferred->exit());
5398 Register
string =
ToRegister(instr->string());
5399 Register result =
ToRegister(instr->result());
5406 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5414 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
5418 __ StoreToSafepointRegisterSlot(x0, result);
5422 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5425 DeferredStringCharFromCode(
LCodeGen* codegen, LStringCharFromCode* instr)
5427 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5428 virtual LInstruction* instr() {
return instr_; }
5430 LStringCharFromCode* instr_;
5433 DeferredStringCharFromCode* deferred =
5434 new(zone()) DeferredStringCharFromCode(
this, instr);
5436 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5438 Register result =
ToRegister(instr->result());
5441 __ B(
hi, deferred->entry());
5442 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5445 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5446 __ B(
eq, deferred->entry());
5447 __ Bind(deferred->exit());
5452 Register char_code =
ToRegister(instr->char_code());
5453 Register result =
ToRegister(instr->result());
5460 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5461 __ SmiTag(char_code);
5463 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5464 __ StoreToSafepointRegisterSlot(x0, result);
5468 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5473 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5476 Condition condition = TokenToCondition(op,
false);
5478 EmitCompareAndBranch(instr, condition, x0, 0);
5482 void LCodeGen::DoSubI(LSubI* instr) {
5488 __ Subs(result, left, right);
5489 DeoptimizeIf(
vs, instr->environment());
5491 __ Sub(result, left, right);
5496 void LCodeGen::DoSubS(LSubS* instr) {
5498 Register result =
ToRegister(instr->result());
5500 Operand right =
ToOperand(instr->right());
5502 __ Subs(result, left, right);
5503 DeoptimizeIf(
vs, instr->environment());
5505 __ Sub(result, left, right);
5523 if (instr->truncating()) {
5524 Register output =
ToRegister(instr->result());
5528 __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
5531 __ TruncateHeapNumberToI(output, input);
5534 __ Bind(&check_bools);
5536 Register true_root = output;
5537 Register false_root = scratch1;
5538 __ LoadTrueFalseRoots(true_root, false_root);
5539 __ Cmp(input, true_root);
5540 __ Cset(output,
eq);
5546 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
5547 instr->environment());
5554 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
5555 instr->environment());
5560 __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2);
5561 DeoptimizeIf(
ne, instr->environment());
5566 __ Fmov(scratch1, dbl_scratch1);
5567 DeoptimizeIfNegative(scratch1, instr->environment());
5574 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5577 DeferredTaggedToI(
LCodeGen* codegen, LTaggedToI* instr)
5579 virtual void Generate() {
5580 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5584 virtual LInstruction* instr() {
return instr_; }
5590 Register output =
ToRegister(instr->result());
5592 if (instr->hydrogen()->value()->representation().IsSmi()) {
5593 __ SmiUntag(output, input);
5595 DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(
this, instr);
5597 __ JumpIfNotSmi(input, deferred->entry());
5598 __ SmiUntag(output, input);
5599 __ Bind(deferred->exit());
5604 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5605 Register result =
ToRegister(instr->result());
5610 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5614 CallRuntime(Runtime::kToFastProperties, 1, instr);
5618 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5626 int literal_offset =
5628 __ LoadObject(x7, instr->hydrogen()->literals());
5630 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5634 __ Mov(x12, Operand(
Smi::FromInt(instr->hydrogen()->literal_index())));
5635 __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5636 __ Mov(x10, Operand(instr->hydrogen()->flags()));
5637 __ Push(x7, x12, x11, x10);
5638 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5641 __ Bind(&materialized);
5643 Label allocated, runtime_allocate;
5645 __ Allocate(size, x0, x10, x11, &runtime_allocate,
TAG_OBJECT);
5648 __ Bind(&runtime_allocate);
5651 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5654 __ Bind(&allocated);
5656 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5660 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5661 Register
object =
ToRegister(instr->object());
5664 Handle<Map> from_map = instr->original_map();
5665 Handle<Map> to_map = instr->transitioned_map();
5669 Label not_applicable;
5673 Register new_map =
ToRegister(instr->temp2());
5674 __ Mov(new_map, Operand(to_map));
5681 PushSafepointRegistersScope scope(
5682 this, Safepoint::kWithRegistersAndDoubles);
5684 __ Mov(x1, Operand(to_map));
5685 bool is_js_array = from_map->instance_type() ==
JS_ARRAY_TYPE;
5686 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
5688 RecordSafepointWithRegistersAndDoubles(
5689 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5691 __ Bind(¬_applicable);
5695 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5696 Register
object =
ToRegister(instr->object());
5700 Label no_memento_found;
5701 __ JumpIfJSArrayHasAllocationMemento(
object, temp1, temp2, &no_memento_found);
5702 Deoptimize(instr->environment());
5703 __ Bind(&no_memento_found);
5707 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5709 Register result =
ToRegister(instr->result());
5710 __ TruncateDoubleToI(result, input);
5711 if (instr->tag_result()) {
5712 __ SmiTag(result, result);
5717 void LCodeGen::DoTypeof(LTypeof* instr) {
5720 CallRuntime(Runtime::kTypeof, 1, instr);
5724 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5725 Handle<String> type_name = instr->type_literal();
5726 Label* true_label = instr->TrueLabel(chunk_);
5727 Label* false_label = instr->FalseLabel(chunk_);
5730 if (type_name->Equals(heap()->number_string())) {
5734 __ JumpIfSmi(value, true_label);
5736 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5737 EmitBranch(instr,
eq);
5739 }
else if (type_name->Equals(heap()->string_string())) {
5742 Register scratch =
ToRegister(instr->temp2());
5744 __ JumpIfSmi(value, false_label);
5745 __ JumpIfObjectType(
5750 }
else if (type_name->Equals(heap()->symbol_string())) {
5753 Register scratch =
ToRegister(instr->temp2());
5755 __ JumpIfSmi(value, false_label);
5757 EmitBranch(instr,
eq);
5759 }
else if (type_name->Equals(heap()->boolean_string())) {
5760 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5761 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5762 EmitBranch(instr,
eq);
5764 }
else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5765 __ CompareRoot(value, Heap::kNullValueRootIndex);
5766 EmitBranch(instr,
eq);
5768 }
else if (type_name->Equals(heap()->undefined_string())) {
5770 Register scratch =
ToRegister(instr->temp1());
5772 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5773 __ JumpIfSmi(value, false_label);
5779 }
else if (type_name->Equals(heap()->function_string())) {
5784 __ JumpIfSmi(value, false_label);
5789 }
else if (type_name->Equals(heap()->object_string())) {
5792 Register scratch =
ToRegister(instr->temp2());
5794 __ JumpIfSmi(value, false_label);
5795 if (!FLAG_harmony_typeof) {
5796 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5798 __ JumpIfObjectType(value, map, scratch,
5801 __ B(
gt, false_label);
5812 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5817 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5818 Register
object =
ToRegister(instr->value());
5823 DeoptimizeIf(
ne, instr->environment());
5827 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5828 Register receiver =
ToRegister(instr->receiver());
5829 Register
function =
ToRegister(instr->function());
5830 Register result =
ToRegister(instr->result());
5835 Label global_object, done, deopt;
5837 if (!instr->hydrogen()->known_function()) {
5853 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5854 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5857 __ JumpIfSmi(receiver, &deopt);
5859 __ Mov(result, receiver);
5864 Deoptimize(instr->environment());
5866 __ Bind(&global_object);
5875 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5876 Register
object =
ToRegister(instr->object());
5878 Register result =
ToRegister(instr->result());
5880 __ AssertSmi(index);
5882 Label out_of_object, done;
5884 __ B(
lt, &out_of_object);
5892 __ Bind(&out_of_object);
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static DwVfpRegister FromAllocationIndex(int index)
static const int kLengthOffset
void FinishCode(Handle< Code > code)
static const int kHashFieldOffset
virtual void EmitInverted(Label *label) const
static const int kBitFieldOffset
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
const intptr_t kSmiTagMask
static const int kCodeEntryOffset
static const int kPrototypeOrInitialMapOffset
static const int kValueOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
static const int kEnumCacheOffset
const uint32_t kTwoByteStringTag
static const int kCallSizeWithRelocation
int StackSlotOffset(int index)
const LowDwVfpRegister d0
virtual void BeforeCall(int call_size) const
RegisterType type() const
static Smi * FromInt(int value)
Smi * ToSmi(LConstantOperand *op) const
static TypeFeedbackId None()
bool NeedsEagerFrame() const
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static Handle< Code > GetUninitialized(Isolate *isolate, Token::Value op)
static const int kDataOffset
BranchIfHeapNumber(LCodeGen *codegen, const Register &value)
bool IsSmi(LConstantOperand *op) const
static Handle< T > cast(Handle< S > that)
static const int kGlobalReceiverOffset
static Representation Integer32()
virtual void EmitInverted(Label *label) const
BranchIfNonZeroNumber(LCodeGen *codegen, const FPRegister &value, const FPRegister &scratch)
virtual void Emit(Label *label) const
static const unsigned int kContainsCachedArrayIndexMask
int MaskToBit(uint64_t mask)
AllocationSiteOverrideMode
static void EmitNotInlined(MacroAssembler *masm)
#define ASSERT(condition)
virtual void Emit(Label *label) const
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
static const int kContextOffset
const int kPointerSizeLog2
static const int kInObjectFieldCount
const uint32_t kStringRepresentationMask
virtual void EmitInverted(Label *label) const
static const int kCallerFPOffset
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
static Handle< Code > initialize_stub(Isolate *isolate, ExtraICState extra_state)
static const int kInstanceClassNameOffset
int WhichPowerOf2(uint32_t x)
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Variable * parameter(int index) const
static Operand UntagSmiAndScale(Register smi, int scale)
bool IsInteger32Constant(LConstantOperand *op) const
static const int kContextOffset
virtual void Emit(Label *label) const
Condition ReverseConditionForCmp(Condition cond)
void DoDeferredStackCheck(LStackCheck *instr)
const uint64_t kSmiShiftMask
int LookupDestination(int block_id) const
const unsigned kWRegSizeInBits
Operand ToOperand(LOperand *op)
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
MemOperand UntagSmiMemOperand(Register object, int offset)
const uint64_t kHoleNanInt64
int32_t WhichPowerOf2Abs(int32_t x)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
friend class LEnvironment
static const int kLengthOffset
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
bool IsFastPackedElementsKind(ElementsKind kind)
Condition InvertCondition(Condition cond)
static const int kDontAdaptArgumentsSentinel
void DoDeferredNumberTagD(LNumberTagD *instr)
static uint32_t update(uint32_tprevious, AllocationSpacevalue)
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
BranchOnCondition(LCodeGen *codegen, Condition cond)
void check(i::Vector< const uint8_t > string)
const unsigned kInstructionSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
static void MaybeCallEntryHook(MacroAssembler *masm)
DwVfpRegister ToDoubleRegister(LOperand *op) const
static FPRegister FromAllocationIndex(unsigned int index)
void DoDeferredAllocate(LAllocate *instr)
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
static const int kMaxRegularHeapObjectSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
static const int kCallerSPOffset
static const int kCacheStampOffset
bool IsFixedTypedArrayElementsKind(ElementsKind kind)
static const int kPropertiesOffset
int32_t ToInteger32(LConstantOperand *op) const
int num_parameters() const
static const int kMarkerOffset
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
Handle< Object > ToHandle(LConstantOperand *op) const
static const int kHeaderSize
static const int kArrayIndexValueBits
virtual void Emit(Label *label) const
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
double ToDouble(LConstantOperand *op) const
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
static int OffsetOfElementAt(int index)
static int SizeFor(int length)
Operand ToOperand32U(LOperand *op)
bool NeedsDeferredFrame() const
static const int kHeaderSize
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
static const int kMapOffset
static const int kValueOffset
static const int kFixedFrameSizeFromFp
Operand ToOperand32I(LOperand *op)
virtual ~SafepointGenerator()
virtual void EmitInverted(Label *label) const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
MemOperand FieldMemOperand(Register object, int offset)
virtual void AfterCall() const
static const int kHasNonInstancePrototype
void WriteTranslation(LEnvironment *environment, Translation *translation)
static const int kFunctionOffset
static const int kNotDeoptimizationEntry
LinkRegisterStatus GetLinkRegisterState() const
static const int kHeaderSize
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
#define ASSERT_EQ(v1, v2)
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
static const int kConstructorOffset
const uint32_t kOneByteStringTag
static double canonical_not_the_hole_nan_as_double()
CompareAndBranch(LCodeGen *codegen, Condition cond, const Register &lhs, const Operand &rhs)
static const int kIsUndetectable
static const int kHeaderSize
Register ToRegister(LOperand *op) const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
void DoDeferredTaggedToI(LTaggedToI *instr)
static const int kPrototypeOffset
Register ToRegister32(LOperand *op) const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
static const int kHashShift
#define RUNTIME_ENTRY(name, nargs, ressize)
const LowDwVfpRegister d1
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
MemOperand ToMemOperand(LOperand *op) const
bool IsNextEmittedBlock(int block_id) const
MemOperand ContextMemOperand(Register context, int index)
virtual void EmitInverted(Label *label) const
static const int kCompilerHintsOffset
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
static const int kSharedFunctionInfoOffset
#define ASM_UNIMPLEMENTED_BREAK(message)
BranchType InvertBranchType(BranchType type)
static const int kMaxValue
friend class SafepointGenerator
virtual void EmitInverted(Label *label) const
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
bool EvalComparison(Token::Value op, double op1, double op2)
MemOperand GlobalObjectMemOperand()
static uint32_t encode(boolvalue)
TestAndBranch(LCodeGen *codegen, Condition cond, const Register &value, uint64_t mask)
const uint32_t kStringEncodingMask
BranchIfRoot(LCodeGen *codegen, const Register &value, Heap::RootListIndex index)
virtual void Emit(Label *label) const
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
virtual void Emit(Label *label) const
static const int kInstanceTypeOffset
friend class LDeferredCode