43 Safepoint::DeoptMode mode)
52 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 Safepoint::DeoptMode deopt_mode_;
64 bool LCodeGen::GenerateCode() {
65 HPhase phase(
"Z_Code generation", chunk());
68 CpuFeatures::Scope scope1(
VFP3);
69 CpuFeatures::Scope scope2(
ARMv7);
71 CodeStub::GenerateFPStubs();
78 return GeneratePrologue() &&
80 GenerateDeferredCode() &&
81 GenerateDeoptJumpTable() &&
82 GenerateSafepointTable();
86 void LCodeGen::FinishCode(Handle<Code> code) {
88 code->set_stack_slots(GetStackSlotCount());
89 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90 PopulateDeoptimizationData(code);
94 void LCodeGen::Abort(
const char* format, ...) {
95 if (FLAG_trace_bailout) {
96 SmartArrayPointer<char>
name(
97 info()->shared_info()->DebugName()->
ToCString());
98 PrintF(
"Aborting LCodeGen in @\"%s\": ", *
name);
100 va_start(arguments, format);
109 void LCodeGen::Comment(
const char* format, ...) {
110 if (!FLAG_code_comments)
return;
112 StringBuilder builder(buffer,
ARRAY_SIZE(buffer));
114 va_start(arguments, format);
115 builder.AddFormattedList(format, arguments);
120 size_t length = builder.position();
122 memcpy(copy.start(), builder.Finalize(), copy.length());
123 masm()->RecordComment(copy.start());
127 bool LCodeGen::GeneratePrologue() {
131 if (strlen(FLAG_stop_at) > 0 &&
132 info_->function()->name()->IsEqualTo(
CStrVector(FLAG_stop_at))) {
146 if (!info_->is_classic_mode() || info_->is_native()) {
148 __ cmp(
r5, Operand(0));
150 int receiver_offset = scope()->num_parameters() *
kPointerSize;
151 __ LoadRoot(
r2, Heap::kUndefinedValueRootIndex);
160 int slots = GetStackSlotCount();
162 if (FLAG_debug_code) {
163 __ mov(
r0, Operand(slots));
177 if (heap_slots > 0) {
178 Comment(
";;; Allocate local context");
182 FastNewContextStub stub(heap_slots);
185 __ CallRuntime(Runtime::kNewFunctionContext, 1);
187 RecordSafepoint(Safepoint::kNoLazyDeopt);
192 int num_parameters = scope()->num_parameters();
193 for (
int i = 0; i < num_parameters; i++) {
194 Variable* var = scope()->parameter(i);
195 if (var->IsContextSlot()) {
204 __ RecordWriteContextSlot(
208 Comment(
";;; End allocate local context");
213 __ CallRuntime(Runtime::kTraceEnter, 0);
215 return !is_aborted();
219 bool LCodeGen::GenerateBody() {
221 bool emit_instructions =
true;
222 for (current_instruction_ = 0;
223 !is_aborted() && current_instruction_ < instructions_->length();
224 current_instruction_++) {
225 LInstruction* instr = instructions_->at(current_instruction_);
226 if (instr->IsLabel()) {
228 emit_instructions = !label->HasReplacement();
231 if (emit_instructions) {
232 Comment(
";;; @%d: %s.", current_instruction_, instr->Mnemonic());
233 instr->CompileToNative(
this);
236 EnsureSpaceForLazyDeopt();
237 return !is_aborted();
241 bool LCodeGen::GenerateDeferredCode() {
243 if (deferred_.length() > 0) {
244 for (
int i = 0; !is_aborted() && i < deferred_.length(); i++) {
245 LDeferredCode* code = deferred_[i];
246 __ bind(code->entry());
247 Comment(
";;; Deferred code @%d: %s.",
248 code->instruction_index(),
249 code->instr()->Mnemonic());
251 __ jmp(code->exit());
257 masm()->CheckConstPool(
true,
false);
259 return !is_aborted();
263 bool LCodeGen::GenerateDeoptJumpTable() {
272 deopt_jump_table_.length() * 2)) {
273 Abort(
"Generated code is too large");
277 __ BlockConstPoolFor(deopt_jump_table_.length());
278 __ RecordComment(
"[ Deoptimisation jump table");
280 __ bind(&table_start);
281 for (
int i = 0; i < deopt_jump_table_.length(); i++) {
282 __ bind(&deopt_jump_table_[i].label);
284 __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
286 ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
287 deopt_jump_table_.length() * 2);
288 __ RecordComment(
"]");
292 if (!is_aborted()) status_ =
DONE;
293 return !is_aborted();
297 bool LCodeGen::GenerateSafepointTable() {
299 safepoints_.Emit(masm(), GetStackSlotCount());
300 return !is_aborted();
320 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
321 if (op->IsRegister()) {
323 }
else if (op->IsConstantOperand()) {
325 Handle<Object> literal = chunk_->LookupLiteral(const_op);
326 Representation r = chunk_->LookupLiteralRepresentation(const_op);
327 if (r.IsInteger32()) {
328 ASSERT(literal->IsNumber());
329 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
330 }
else if (r.IsDouble()) {
331 Abort(
"EmitLoadRegister: Unsupported double immediate.");
334 if (literal->IsSmi()) {
335 __ mov(scratch, Operand(literal));
337 __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
341 }
else if (op->IsStackSlot() || op->IsArgument()) {
342 __ ldr(scratch, ToMemOperand(op));
351 ASSERT(op->IsDoubleRegister());
352 return ToDoubleRegister(op->index());
357 SwVfpRegister flt_scratch,
359 if (op->IsDoubleRegister()) {
360 return ToDoubleRegister(op->index());
361 }
else if (op->IsConstantOperand()) {
363 Handle<Object> literal = chunk_->LookupLiteral(const_op);
364 Representation r = chunk_->LookupLiteralRepresentation(const_op);
365 if (r.IsInteger32()) {
366 ASSERT(literal->IsNumber());
367 __ mov(
ip, Operand(static_cast<int32_t>(literal->Number())));
368 __ vmov(flt_scratch,
ip);
369 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
371 }
else if (r.IsDouble()) {
372 Abort(
"unsupported double immediate");
373 }
else if (r.IsTagged()) {
374 Abort(
"unsupported tagged immediate");
376 }
else if (op->IsStackSlot() || op->IsArgument()) {
380 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
388 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op)
const {
389 Handle<Object> literal = chunk_->LookupLiteral(op);
390 ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
395 bool LCodeGen::IsInteger32(LConstantOperand* op)
const {
396 return chunk_->LookupLiteralRepresentation(op).IsInteger32();
400 int LCodeGen::ToInteger32(LConstantOperand* op)
const {
401 Handle<Object> value = chunk_->LookupLiteral(op);
402 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
403 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
405 return static_cast<int32_t>(value->Number());
409 double LCodeGen::ToDouble(LConstantOperand* op)
const {
410 Handle<Object> value = chunk_->LookupLiteral(op);
411 return value->Number();
415 Operand LCodeGen::ToOperand(LOperand* op) {
416 if (op->IsConstantOperand()) {
418 Handle<Object> literal = chunk_->LookupLiteral(const_op);
419 Representation r = chunk_->LookupLiteralRepresentation(const_op);
420 if (r.IsInteger32()) {
421 ASSERT(literal->IsNumber());
422 return Operand(static_cast<int32_t>(literal->Number()));
423 }
else if (r.IsDouble()) {
424 Abort(
"ToOperand Unsupported double immediate.");
427 return Operand(literal);
428 }
else if (op->IsRegister()) {
430 }
else if (op->IsDoubleRegister()) {
431 Abort(
"ToOperand IsDoubleRegister unimplemented");
440 MemOperand LCodeGen::ToMemOperand(LOperand* op)
const {
441 ASSERT(!op->IsRegister());
442 ASSERT(!op->IsDoubleRegister());
443 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
444 int index = op->index();
456 MemOperand LCodeGen::ToHighMemOperand(LOperand* op)
const {
457 ASSERT(op->IsDoubleStackSlot());
458 int index = op->index();
471 void LCodeGen::WriteTranslation(LEnvironment* environment,
472 Translation* translation) {
473 if (environment ==
NULL)
return;
476 int translation_size = environment->values()->length();
478 int height = translation_size - environment->parameter_count();
480 WriteTranslation(environment->outer(), translation);
481 int closure_id = DefineDeoptimizationLiteral(environment->closure());
482 switch (environment->frame_type()) {
484 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
487 translation->BeginConstructStubFrame(closure_id, translation_size);
490 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
495 for (
int i = 0; i < translation_size; ++i) {
496 LOperand* value = environment->values()->at(i);
499 if (environment->spilled_registers() !=
NULL && value !=
NULL) {
500 if (value->IsRegister() &&
501 environment->spilled_registers()[value->index()] !=
NULL) {
502 translation->MarkDuplicate();
503 AddToTranslation(translation,
504 environment->spilled_registers()[value->index()],
505 environment->HasTaggedValueAt(i));
507 value->IsDoubleRegister() &&
508 environment->spilled_double_registers()[value->index()] !=
NULL) {
509 translation->MarkDuplicate();
512 environment->spilled_double_registers()[value->index()],
517 AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
522 void LCodeGen::AddToTranslation(Translation* translation,
529 translation->StoreArgumentsObject();
530 }
else if (op->IsStackSlot()) {
532 translation->StoreStackSlot(op->index());
534 translation->StoreInt32StackSlot(op->index());
536 }
else if (op->IsDoubleStackSlot()) {
537 translation->StoreDoubleStackSlot(op->index());
538 }
else if (op->IsArgument()) {
540 int src_index = GetStackSlotCount() + op->index();
541 translation->StoreStackSlot(src_index);
542 }
else if (op->IsRegister()) {
545 translation->StoreRegister(reg);
547 translation->StoreInt32Register(reg);
549 }
else if (op->IsDoubleRegister()) {
551 translation->StoreDoubleRegister(reg);
552 }
else if (op->IsConstantOperand()) {
554 int src_index = DefineDeoptimizationLiteral(literal);
555 translation->StoreLiteral(src_index);
562 void LCodeGen::CallCode(Handle<Code> code,
563 RelocInfo::Mode mode,
564 LInstruction* instr) {
565 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
569 void LCodeGen::CallCodeGeneric(Handle<Code> code,
570 RelocInfo::Mode mode,
572 SafepointMode safepoint_mode) {
577 LPointerMap* pointers = instr->pointer_map();
578 RecordPosition(pointers->position());
580 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
591 void LCodeGen::CallRuntime(
const Runtime::Function*
function,
593 LInstruction* instr) {
595 LPointerMap* pointers = instr->pointer_map();
597 RecordPosition(pointers->position());
599 __ CallRuntime(
function, num_arguments);
600 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
606 LInstruction* instr) {
607 __ CallRuntimeSaveDoubles(
id);
608 RecordSafepointWithRegisters(
609 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
613 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
614 Safepoint::DeoptMode mode) {
615 if (!environment->HasBeenRegistered()) {
630 int jsframe_count = 0;
631 for (LEnvironment* e = environment; e !=
NULL; e = e->outer()) {
637 Translation translation(&translations_, frame_count, jsframe_count,
639 WriteTranslation(environment, &translation);
640 int deoptimization_index = deoptimizations_.length();
641 int pc_offset = masm()->pc_offset();
642 environment->Register(deoptimization_index,
644 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
645 deoptimizations_.Add(environment, zone());
650 void LCodeGen::DeoptimizeIf(
Condition cc, LEnvironment* environment) {
651 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
652 ASSERT(environment->HasBeenRegistered());
653 int id = environment->deoptimization_index();
656 Abort(
"bailout was not prepared");
660 ASSERT(FLAG_deopt_every_n_times < 2);
662 if (FLAG_deopt_every_n_times == 1 &&
663 info_->shared_info()->opt_count() == id) {
668 if (FLAG_trap_on_deopt)
__ stop(
"trap_on_deopt", cc);
675 if (deopt_jump_table_.is_empty() ||
676 (deopt_jump_table_.last().address != entry)) {
677 deopt_jump_table_.Add(JumpTableEntry(entry), zone());
679 __ b(cc, &deopt_jump_table_.last().label);
684 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
685 int length = deoptimizations_.length();
686 if (length == 0)
return;
687 Handle<DeoptimizationInputData> data =
688 factory()->NewDeoptimizationInputData(length,
TENURED);
690 Handle<ByteArray> translations = translations_.CreateByteArray();
691 data->SetTranslationByteArray(*translations);
692 data->SetInlinedFunctionCount(
Smi::FromInt(inlined_function_count_));
694 Handle<FixedArray> literals =
695 factory()->NewFixedArray(deoptimization_literals_.length(),
TENURED);
696 for (
int i = 0; i < deoptimization_literals_.length(); i++) {
697 literals->set(i, *deoptimization_literals_[i]);
699 data->SetLiteralArray(*literals);
705 for (
int i = 0; i < length; i++) {
706 LEnvironment* env = deoptimizations_[i];
708 data->SetTranslationIndex(i,
Smi::FromInt(env->translation_index()));
709 data->SetArgumentsStackHeight(i,
713 code->set_deoptimization_data(*data);
717 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
718 int result = deoptimization_literals_.length();
719 for (
int i = 0; i < deoptimization_literals_.length(); ++i) {
720 if (deoptimization_literals_[i].is_identical_to(literal))
return i;
722 deoptimization_literals_.Add(literal, zone());
727 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
728 ASSERT(deoptimization_literals_.length() == 0);
730 const ZoneList<Handle<JSFunction> >* inlined_closures =
731 chunk()->inlined_closures();
733 for (
int i = 0, length = inlined_closures->length();
736 DefineDeoptimizationLiteral(inlined_closures->at(i));
739 inlined_function_count_ = deoptimization_literals_.length();
743 void LCodeGen::RecordSafepointWithLazyDeopt(
744 LInstruction* instr, SafepointMode safepoint_mode) {
745 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
746 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
748 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
749 RecordSafepointWithRegisters(
750 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
755 void LCodeGen::RecordSafepoint(
756 LPointerMap* pointers,
757 Safepoint::Kind kind,
759 Safepoint::DeoptMode deopt_mode) {
760 ASSERT(expected_safepoint_kind_ == kind);
762 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
763 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
764 kind, arguments, deopt_mode);
765 for (
int i = 0; i < operands->length(); i++) {
766 LOperand* pointer = operands->at(i);
767 if (pointer->IsStackSlot()) {
768 safepoint.DefinePointerSlot(pointer->index(), zone());
769 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
770 safepoint.DefinePointerRegister(
ToRegister(pointer), zone());
773 if (kind & Safepoint::kWithRegisters) {
775 safepoint.DefinePointerRegister(
cp, zone());
780 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
781 Safepoint::DeoptMode deopt_mode) {
782 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
786 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
787 LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
788 RecordSafepoint(&empty_pointers, deopt_mode);
792 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
794 Safepoint::DeoptMode deopt_mode) {
796 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
800 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
801 LPointerMap* pointers,
803 Safepoint::DeoptMode deopt_mode) {
805 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
809 void LCodeGen::RecordPosition(
int position) {
810 if (position == RelocInfo::kNoPosition)
return;
811 masm()->positions_recorder()->RecordPosition(position);
815 void LCodeGen::DoLabel(LLabel* label) {
816 if (label->is_loop_header()) {
817 Comment(
";;; B%d - LOOP entry", label->block_id());
819 Comment(
";;; B%d", label->block_id());
821 __ bind(label->label());
822 current_block_ = label->block_id();
827 void LCodeGen::DoParallelMove(LParallelMove* move) {
828 resolver_.Resolve(move);
832 void LCodeGen::DoGap(LGap* gap) {
837 LParallelMove* move = gap->GetParallelMove(inner_pos);
838 if (move !=
NULL) DoParallelMove(move);
843 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
848 void LCodeGen::DoParameter(LParameter* instr) {
853 void LCodeGen::DoCallStub(LCallStub* instr) {
855 switch (instr->hydrogen()->major_key()) {
856 case CodeStub::RegExpConstructResult: {
857 RegExpConstructResultStub stub;
858 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
861 case CodeStub::RegExpExec: {
863 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
868 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
871 case CodeStub::NumberToString: {
872 NumberToStringStub stub;
873 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
876 case CodeStub::StringAdd: {
878 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
881 case CodeStub::StringCompare: {
882 StringCompareStub stub;
883 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
886 case CodeStub::TranscendentalCache: {
888 TranscendentalCacheStub stub(instr->transcendental_type(),
890 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
899 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
904 void LCodeGen::DoModI(LModI* instr) {
905 if (instr->hydrogen()->HasPowerOf2Divisor()) {
906 Register dividend =
ToRegister(instr->InputAt(0));
907 Register result =
ToRegister(instr->result());
912 if (divisor < 0) divisor = -divisor;
914 Label positive_dividend, done;
915 __ cmp(dividend, Operand(0));
916 __ b(
pl, &positive_dividend);
917 __ rsb(result, dividend, Operand(0));
918 __ and_(result, result, Operand(divisor - 1),
SetCC);
920 DeoptimizeIf(
eq, instr->environment());
922 __ rsb(result, result, Operand(0));
924 __ bind(&positive_dividend);
925 __ and_(result, dividend, Operand(divisor - 1));
931 Register left =
ToRegister(instr->InputAt(0));
932 Register right =
ToRegister(instr->InputAt(1));
933 Register result =
ToRegister(instr->result());
935 Register scratch = scratch0();
936 Register scratch2 =
ToRegister(instr->TempAt(0));
937 DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
938 DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
939 DwVfpRegister quotient = double_scratch0();
941 ASSERT(!dividend.is(divisor));
942 ASSERT(!dividend.is(quotient));
943 ASSERT(!divisor.is(quotient));
944 ASSERT(!scratch.is(left));
945 ASSERT(!scratch.is(right));
946 ASSERT(!scratch.is(result));
948 Label done, vfp_modulo, both_positive, right_negative;
952 __ cmp(right, Operand(0));
953 DeoptimizeIf(
eq, instr->environment());
956 __ Move(result, left);
959 __ cmp(left, Operand(0));
962 __ vmov(divisor.low(), right);
963 __ b(
lt, &vfp_modulo);
965 __ cmp(left, Operand(right));
969 __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
974 __ and_(result, scratch, Operand(left));
977 __ bind(&right_negative);
979 __ rsb(right, right, Operand(0));
981 __ bind(&both_positive);
982 const int kUnfolds = 3;
986 __ mov(scratch, left);
987 for (
int i = 0; i < kUnfolds; i++) {
990 __ cmp(scratch, Operand(right));
995 if (i < kUnfolds - 1)
__ sub(scratch, scratch, right);
998 __ bind(&vfp_modulo);
1002 __ vmov(dividend.low(), left);
1007 __ vcvt_f64_s32(dividend, dividend.low());
1008 __ vcvt_f64_s32(divisor, divisor.low());
1011 __ vabs(divisor, divisor);
1013 __ vdiv(quotient, dividend, divisor);
1014 __ vcvt_s32_f64(quotient.low(), quotient);
1015 __ vcvt_f64_s32(quotient, quotient.low());
1018 DwVfpRegister double_scratch = dividend;
1019 __ vmul(double_scratch, divisor, quotient);
1020 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
1021 __ vmov(scratch, double_scratch.low());
1024 __ sub(result, left, scratch);
1028 __ sub(scratch2, left, scratch,
SetCC);
1030 __ cmp(left, Operand(0));
1031 DeoptimizeIf(
mi, instr->environment());
1034 __ mov(result, scratch2);
1041 void LCodeGen::EmitSignedIntegerDivisionByConstant(
1047 LEnvironment* environment) {
1049 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1051 uint32_t divisor_abs = abs(divisor);
1054 CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1056 switch (divisor_abs) {
1058 DeoptimizeIf(
al, environment);
1063 __ Move(result, dividend);
1065 __ rsb(result, dividend, Operand(0),
SetCC);
1066 DeoptimizeIf(
vs, environment);
1069 __ mov(remainder, Operand(0));
1078 __ mov(scratch, Operand(dividend,
ASR, power - 1));
1080 __ add(scratch, dividend, Operand(scratch,
LSR, 32 - power));
1081 __ mov(result, Operand(scratch,
ASR, power));
1087 __ rsb(result, result, Operand(0));
1091 __ sub(remainder, dividend, Operand(result,
LSL, power));
1093 __ add(remainder, dividend, Operand(result,
LSL, power));
1107 DivMagicNumbers magic_numbers =
1111 const int32_t M = magic_numbers.M;
1112 const int32_t s = magic_numbers.s + power_of_2_factor;
1114 __ mov(
ip, Operand(M));
1115 __ smull(
ip, scratch, dividend,
ip);
1117 __ add(scratch, scratch, Operand(dividend));
1120 __ mov(scratch, Operand(scratch,
ASR, s));
1122 __ add(result, scratch, Operand(dividend,
LSR, 31));
1123 if (divisor < 0)
__ rsb(result, result, Operand(0));
1125 __ mov(
ip, Operand(divisor));
1128 __ mul(scratch, result,
ip);
1129 __ sub(remainder, dividend, scratch);
1135 void LCodeGen::DoDivI(LDivI* instr) {
1136 class DeferredDivI:
public LDeferredCode {
1138 DeferredDivI(LCodeGen* codegen, LDivI* instr)
1139 : LDeferredCode(codegen), instr_(instr) { }
1140 virtual void Generate() {
1141 codegen()->DoDeferredBinaryOpStub(instr_,
Token::DIV);
1143 virtual LInstruction* instr() {
return instr_; }
1148 const Register left =
ToRegister(instr->InputAt(0));
1149 const Register right =
ToRegister(instr->InputAt(1));
1150 const Register scratch = scratch0();
1151 const Register result =
ToRegister(instr->result());
1155 __ cmp(right, Operand(0));
1156 DeoptimizeIf(
eq, instr->environment());
1161 Label left_not_zero;
1162 __ cmp(left, Operand(0));
1163 __ b(
ne, &left_not_zero);
1164 __ cmp(right, Operand(0));
1165 DeoptimizeIf(
mi, instr->environment());
1166 __ bind(&left_not_zero);
1171 Label left_not_min_int;
1173 __ b(
ne, &left_not_min_int);
1174 __ cmp(right, Operand(-1));
1175 DeoptimizeIf(
eq, instr->environment());
1176 __ bind(&left_not_min_int);
1179 Label done, deoptimize;
1181 __ cmp(right, Operand(1));
1185 __ cmp(right, Operand(2));
1186 __ tst(left, Operand(1),
eq);
1190 __ cmp(right, Operand(4));
1191 __ tst(left, Operand(3),
eq);
1197 DeferredDivI* deferred =
new(zone()) DeferredDivI(
this, instr);
1199 __ TrySmiTag(left, &deoptimize, scratch);
1200 __ TrySmiTag(right, &deoptimize, scratch);
1202 __ b(
al, deferred->entry());
1203 __ bind(deferred->exit());
1206 __ JumpIfNotSmi(result, &deoptimize);
1207 __ SmiUntag(result);
1210 __ bind(&deoptimize);
1211 DeoptimizeIf(
al, instr->environment());
1216 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1217 const Register result =
ToRegister(instr->result());
1218 const Register left =
ToRegister(instr->InputAt(0));
1219 const Register remainder =
ToRegister(instr->TempAt(0));
1220 const Register scratch = scratch0();
1225 ASSERT(instr->InputAt(1)->IsConstantOperand());
1228 __ cmp(left, Operand(0));
1229 DeoptimizeIf(
eq, instr->environment());
1231 EmitSignedIntegerDivisionByConstant(result,
1236 instr->environment());
1238 __ cmp(remainder, Operand(0));
1239 __ teq(remainder, Operand(divisor),
ne);
1245 void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
1247 Register left =
ToRegister(instr->InputAt(0));
1248 Register right =
ToRegister(instr->InputAt(1));
1250 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
1254 }
else if (left.is(
r0) && right.is(
r1)) {
1256 }
else if (left.is(
r0)) {
1267 RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
1269 Safepoint::kNoLazyDeopt);
1271 __ StoreToSafepointRegistersAndDoublesSlot(
r0,
r0);
1275 void LCodeGen::DoMulI(LMulI* instr) {
1276 Register scratch = scratch0();
1277 Register result =
ToRegister(instr->result());
1279 Register left =
ToRegister(instr->InputAt(0));
1280 LOperand* right_op = instr->InputAt(1);
1283 bool bailout_on_minus_zero =
1286 if (right_op->IsConstantOperand() && !can_overflow) {
1290 if (bailout_on_minus_zero && (constant < 0)) {
1293 __ cmp(left, Operand(0));
1294 DeoptimizeIf(
eq, instr->environment());
1299 __ rsb(result, left, Operand(0));
1302 if (bailout_on_minus_zero) {
1305 __ cmp(left, Operand(0));
1306 DeoptimizeIf(
mi, instr->environment());
1308 __ mov(result, Operand(0));
1311 __ Move(result, left);
1317 int32_t mask = constant >> 31;
1318 uint32_t constant_abs = (constant + mask) ^ mask;
1325 __ mov(result, Operand(left,
LSL, shift));
1328 __ add(result, left, Operand(left,
LSL, shift));
1331 __ rsb(result, left, Operand(left,
LSL, shift));
1335 if (constant < 0)
__ rsb(result, result, Operand(0));
1339 __ mov(
ip, Operand(constant));
1340 __ mul(result, left,
ip);
1345 Register right = EmitLoadRegister(right_op, scratch);
1346 if (bailout_on_minus_zero) {
1352 __ smull(result, scratch, left, right);
1353 __ cmp(scratch, Operand(result,
ASR, 31));
1354 DeoptimizeIf(
ne, instr->environment());
1356 __ mul(result, left, right);
1359 if (bailout_on_minus_zero) {
1362 __ cmp(result, Operand(0));
1365 DeoptimizeIf(
mi, instr->environment());
1372 void LCodeGen::DoBitI(LBitI* instr) {
1373 LOperand* left_op = instr->InputAt(0);
1374 LOperand* right_op = instr->InputAt(1);
1375 ASSERT(left_op->IsRegister());
1377 Register result =
ToRegister(instr->result());
1380 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1381 right = Operand(EmitLoadRegister(right_op,
ip));
1383 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1384 right = ToOperand(right_op);
1387 switch (instr->op()) {
1388 case Token::BIT_AND:
1389 __ and_(result, left, right);
1392 __ orr(result, left, right);
1394 case Token::BIT_XOR:
1395 __ eor(result, left, right);
1404 void LCodeGen::DoShiftI(LShiftI* instr) {
1407 LOperand* right_op = instr->InputAt(1);
1408 Register left =
ToRegister(instr->InputAt(0));
1409 Register result =
ToRegister(instr->result());
1410 Register scratch = scratch0();
1411 if (right_op->IsRegister()) {
1414 switch (instr->op()) {
1416 __ mov(result, Operand(left,
ASR, scratch));
1419 if (instr->can_deopt()) {
1420 __ mov(result, Operand(left,
LSR, scratch),
SetCC);
1421 DeoptimizeIf(
mi, instr->environment());
1423 __ mov(result, Operand(left,
LSR, scratch));
1427 __ mov(result, Operand(left,
LSL, scratch));
1436 uint8_t shift_count =
static_cast<uint8_t
>(value & 0x1F);
1437 switch (instr->op()) {
1439 if (shift_count != 0) {
1440 __ mov(result, Operand(left,
ASR, shift_count));
1442 __ Move(result, left);
1446 if (shift_count != 0) {
1447 __ mov(result, Operand(left,
LSR, shift_count));
1449 if (instr->can_deopt()) {
1450 __ tst(left, Operand(0x80000000));
1451 DeoptimizeIf(
ne, instr->environment());
1453 __ Move(result, left);
1457 if (shift_count != 0) {
1458 __ mov(result, Operand(left,
LSL, shift_count));
1460 __ Move(result, left);
1471 void LCodeGen::DoSubI(LSubI* instr) {
1472 LOperand* left = instr->InputAt(0);
1473 LOperand* right = instr->InputAt(1);
1474 LOperand* result = instr->result();
1478 if (right->IsStackSlot() || right->IsArgument()) {
1479 Register right_reg = EmitLoadRegister(right,
ip);
1482 ASSERT(right->IsRegister() || right->IsConstantOperand());
1487 DeoptimizeIf(
vs, instr->environment());
1492 void LCodeGen::DoConstantI(LConstantI* instr) {
1493 ASSERT(instr->result()->IsRegister());
1494 __ mov(
ToRegister(instr->result()), Operand(instr->value()));
1498 void LCodeGen::DoConstantD(LConstantD* instr) {
1499 ASSERT(instr->result()->IsDoubleRegister());
1500 DwVfpRegister result = ToDoubleRegister(instr->result());
1501 double v = instr->value();
1506 void LCodeGen::DoConstantT(LConstantT* instr) {
1507 Handle<Object> value = instr->value();
1508 if (value->IsSmi()) {
1512 Handle<HeapObject>::cast(value));
1517 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1518 Register result =
ToRegister(instr->result());
1519 Register array =
ToRegister(instr->InputAt(0));
1524 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1525 Register result =
ToRegister(instr->result());
1526 Register array =
ToRegister(instr->InputAt(0));
1531 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1532 Register result =
ToRegister(instr->result());
1533 Register input =
ToRegister(instr->InputAt(0));
1545 void LCodeGen::DoValueOf(LValueOf* instr) {
1546 Register input =
ToRegister(instr->InputAt(0));
1547 Register result =
ToRegister(instr->result());
1553 __ Move(result, input,
eq);
1558 __ Move(result, input,
ne);
1566 void LCodeGen::DoDateField(LDateField* instr) {
1567 Register
object =
ToRegister(instr->InputAt(0));
1568 Register result =
ToRegister(instr->result());
1569 Register scratch =
ToRegister(instr->TempAt(0));
1570 Smi* index = instr->index();
1571 Label runtime, done;
1572 ASSERT(
object.is(result));
1574 ASSERT(!scratch.is(scratch0()));
1575 ASSERT(!scratch.is(
object));
1578 __ AbortIfSmi(
object);
1580 __ Assert(
eq,
"Trying to get date field from non-date.");
1583 if (index->value() == 0) {
1587 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1588 __ mov(scratch, Operand(stamp));
1591 __ cmp(scratch, scratch0());
1598 __ PrepareCallCFunction(2, scratch);
1599 __ mov(
r1, Operand(index));
1600 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1606 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1607 Register input =
ToRegister(instr->InputAt(0));
1608 Register result =
ToRegister(instr->result());
1609 __ mvn(result, Operand(input));
1613 void LCodeGen::DoThrow(LThrow* instr) {
1614 Register input_reg = EmitLoadRegister(instr->InputAt(0),
ip);
1616 CallRuntime(Runtime::kThrow, 1, instr);
1618 if (FLAG_debug_code) {
1619 __ stop(
"Unreachable code.");
1624 void LCodeGen::DoAddI(LAddI* instr) {
1625 LOperand* left = instr->InputAt(0);
1626 LOperand* right = instr->InputAt(1);
1627 LOperand* result = instr->result();
1631 if (right->IsStackSlot() || right->IsArgument()) {
1632 Register right_reg = EmitLoadRegister(right,
ip);
1635 ASSERT(right->IsRegister() || right->IsConstantOperand());
1640 DeoptimizeIf(
vs, instr->environment());
1645 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1649 switch (instr->op()) {
1651 __ vadd(result, left, right);
1654 __ vsub(result, left, right);
1657 __ vmul(result, left, right);
1660 __ vdiv(result, left, right);
1666 __ PrepareCallCFunction(0, 2, scratch0());
1667 __ SetCallCDoubleArguments(left, right);
1669 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1672 __ GetCFunctionDoubleResult(result);
1685 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1694 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1699 int LCodeGen::GetNextEmittedBlock(
int block) {
1700 for (
int i = block + 1; i < graph()->blocks()->length(); ++i) {
1701 LLabel* label = chunk_->GetLabel(i);
1702 if (!label->HasReplacement())
return i;
1708 void LCodeGen::EmitBranch(
int left_block,
int right_block,
Condition cc) {
1709 int next_block = GetNextEmittedBlock(current_block_);
1710 right_block = chunk_->LookupDestination(right_block);
1711 left_block = chunk_->LookupDestination(left_block);
1713 if (right_block == left_block) {
1714 EmitGoto(left_block);
1715 }
else if (left_block == next_block) {
1717 }
else if (right_block == next_block) {
1718 __ b(cc, chunk_->GetAssemblyLabel(left_block));
1720 __ b(cc, chunk_->GetAssemblyLabel(left_block));
1721 __ b(chunk_->GetAssemblyLabel(right_block));
1726 void LCodeGen::DoBranch(LBranch* instr) {
1727 int true_block = chunk_->LookupDestination(instr->true_block_id());
1728 int false_block = chunk_->LookupDestination(instr->false_block_id());
1730 Representation r = instr->hydrogen()->value()->representation();
1731 if (r.IsInteger32()) {
1732 Register reg =
ToRegister(instr->InputAt(0));
1733 __ cmp(reg, Operand(0));
1734 EmitBranch(true_block, false_block,
ne);
1735 }
else if (r.IsDouble()) {
1737 Register scratch = scratch0();
1740 __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1742 EmitBranch(true_block, false_block,
eq);
1745 Register reg =
ToRegister(instr->InputAt(0));
1746 HType
type = instr->hydrogen()->value()->type();
1747 if (type.IsBoolean()) {
1748 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1749 EmitBranch(true_block, false_block,
eq);
1750 }
else if (type.IsSmi()) {
1751 __ cmp(reg, Operand(0));
1752 EmitBranch(true_block, false_block,
ne);
1754 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1755 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1757 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1763 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1764 __ b(
eq, false_label);
1768 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1769 __ b(
eq, true_label);
1770 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1771 __ b(
eq, false_label);
1775 __ CompareRoot(reg, Heap::kNullValueRootIndex);
1776 __ b(
eq, false_label);
1781 __ cmp(reg, Operand(0));
1782 __ b(
eq, false_label);
1783 __ JumpIfSmi(reg, true_label);
1784 }
else if (expected.NeedsMap()) {
1787 DeoptimizeIf(
eq, instr->environment());
1790 const Register map = scratch0();
1791 if (expected.NeedsMap()) {
1794 if (expected.CanBeUndetectable()) {
1798 __ b(
ne, false_label);
1805 __ b(
ge, true_label);
1812 __ b(
ge, ¬_string);
1814 __ cmp(
ip, Operand(0));
1815 __ b(
ne, true_label);
1817 __ bind(¬_string);
1823 Label not_heap_number;
1824 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1825 __ b(
ne, ¬_heap_number);
1827 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
1828 __ b(
vs, false_label);
1829 __ b(
eq, false_label);
1831 __ bind(¬_heap_number);
1835 DeoptimizeIf(
al, instr->environment());
1841 void LCodeGen::EmitGoto(
int block) {
1842 block = chunk_->LookupDestination(block);
1843 int next_block = GetNextEmittedBlock(current_block_);
1844 if (block != next_block) {
1845 __ jmp(chunk_->GetAssemblyLabel(block));
1850 void LCodeGen::DoGoto(LGoto* instr) {
1851 EmitGoto(instr->block_id());
1859 case Token::EQ_STRICT:
1863 cond = is_unsigned ?
lo :
lt;
1866 cond = is_unsigned ?
hi :
gt;
1869 cond = is_unsigned ?
ls :
le;
1872 cond = is_unsigned ?
hs :
ge;
1875 case Token::INSTANCEOF:
1883 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1884 LOperand* left = instr->InputAt(0);
1885 LOperand* right = instr->InputAt(1);
1886 int false_block = chunk_->LookupDestination(instr->false_block_id());
1887 int true_block = chunk_->LookupDestination(instr->true_block_id());
1888 Condition cond = TokenToCondition(instr->op(),
false);
1890 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1897 EmitGoto(next_block);
1899 if (instr->is_double()) {
1902 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
1905 __ b(
vs, chunk_->GetAssemblyLabel(false_block));
1907 if (right->IsConstantOperand()) {
1910 }
else if (left->IsConstantOperand()) {
1919 EmitBranch(true_block, false_block, cond);
1924 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1925 Register left =
ToRegister(instr->InputAt(0));
1926 Register right =
ToRegister(instr->InputAt(1));
1927 int false_block = chunk_->LookupDestination(instr->false_block_id());
1928 int true_block = chunk_->LookupDestination(instr->true_block_id());
1930 __ cmp(left, Operand(right));
1931 EmitBranch(true_block, false_block,
eq);
1935 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1936 Register left =
ToRegister(instr->InputAt(0));
1937 int true_block = chunk_->LookupDestination(instr->true_block_id());
1938 int false_block = chunk_->LookupDestination(instr->false_block_id());
1940 __ cmp(left, Operand(instr->hydrogen()->right()));
1941 EmitBranch(true_block, false_block,
eq);
1945 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1946 Register scratch = scratch0();
1947 Register reg =
ToRegister(instr->InputAt(0));
1948 int false_block = chunk_->LookupDestination(instr->false_block_id());
1952 if (instr->hydrogen()->representation().IsSpecialization() ||
1953 instr->hydrogen()->type().IsSmi()) {
1954 EmitGoto(false_block);
1958 int true_block = chunk_->LookupDestination(instr->true_block_id());
1960 Heap::kNullValueRootIndex :
1961 Heap::kUndefinedValueRootIndex;
1962 __ LoadRoot(
ip, nil_value);
1965 EmitBranch(true_block, false_block,
eq);
1968 Heap::kUndefinedValueRootIndex :
1969 Heap::kNullValueRootIndex;
1970 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1971 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1972 __ b(
eq, true_label);
1973 __ LoadRoot(
ip, other_nil_value);
1975 __ b(
eq, true_label);
1976 __ JumpIfSmi(reg, false_label);
1982 EmitBranch(true_block, false_block,
ne);
1987 Condition LCodeGen::EmitIsObject(Register input,
1989 Label* is_not_object,
1991 Register temp2 = scratch0();
1992 __ JumpIfSmi(input, is_not_object);
1994 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1995 __ cmp(input, temp2);
1996 __ b(
eq, is_object);
2003 __ b(
ne, is_not_object);
2008 __ b(
lt, is_not_object);
2014 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2015 Register reg =
ToRegister(instr->InputAt(0));
2016 Register temp1 =
ToRegister(instr->TempAt(0));
2018 int true_block = chunk_->LookupDestination(instr->true_block_id());
2019 int false_block = chunk_->LookupDestination(instr->false_block_id());
2020 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2021 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2024 EmitIsObject(reg, temp1, false_label, true_label);
2026 EmitBranch(true_block, false_block, true_cond);
2030 Condition LCodeGen::EmitIsString(Register input,
2032 Label* is_not_string) {
2033 __ JumpIfSmi(input, is_not_string);
2040 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2041 Register reg =
ToRegister(instr->InputAt(0));
2042 Register temp1 =
ToRegister(instr->TempAt(0));
2044 int true_block = chunk_->LookupDestination(instr->true_block_id());
2045 int false_block = chunk_->LookupDestination(instr->false_block_id());
2046 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2049 EmitIsString(reg, temp1, false_label);
2051 EmitBranch(true_block, false_block, true_cond);
2055 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2056 int true_block = chunk_->LookupDestination(instr->true_block_id());
2057 int false_block = chunk_->LookupDestination(instr->false_block_id());
2059 Register input_reg = EmitLoadRegister(instr->InputAt(0),
ip);
2061 EmitBranch(true_block, false_block,
eq);
2065 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2066 Register input =
ToRegister(instr->InputAt(0));
2067 Register temp =
ToRegister(instr->TempAt(0));
2069 int true_block = chunk_->LookupDestination(instr->true_block_id());
2070 int false_block = chunk_->LookupDestination(instr->false_block_id());
2072 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
2076 EmitBranch(true_block, false_block,
ne);
2082 case Token::EQ_STRICT:
2100 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2102 int true_block = chunk_->LookupDestination(instr->true_block_id());
2103 int false_block = chunk_->LookupDestination(instr->false_block_id());
2106 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2107 __ cmp(
r0, Operand(0));
2109 Condition condition = ComputeCompareCondition(op);
2111 EmitBranch(true_block, false_block, condition);
2115 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2124 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2127 if (from == to)
return eq;
2135 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2136 Register scratch = scratch0();
2137 Register input =
ToRegister(instr->InputAt(0));
2139 int true_block = chunk_->LookupDestination(instr->true_block_id());
2140 int false_block = chunk_->LookupDestination(instr->false_block_id());
2142 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2144 __ JumpIfSmi(input, false_label);
2146 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2147 EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
2151 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2152 Register input =
ToRegister(instr->InputAt(0));
2153 Register result =
ToRegister(instr->result());
2155 if (FLAG_debug_code) {
2156 __ AbortIfNotString(input);
2160 __ IndexFromHash(result, result);
2164 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2165 LHasCachedArrayIndexAndBranch* instr) {
2166 Register input =
ToRegister(instr->InputAt(0));
2167 Register scratch = scratch0();
2169 int true_block = chunk_->LookupDestination(instr->true_block_id());
2170 int false_block = chunk_->LookupDestination(instr->false_block_id());
2175 EmitBranch(true_block, false_block,
eq);
2181 void LCodeGen::EmitClassOfTest(Label* is_true,
2183 Handle<String>class_name,
2188 ASSERT(!input.is(temp2));
2191 __ JumpIfSmi(input, is_false);
2193 if (class_name->IsEqualTo(
CStrVector(
"Function"))) {
2224 if (class_name->IsEqualTo(
CStrVector(
"Object"))) {
2241 __ cmp(temp, Operand(class_name));
2246 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2247 Register input =
ToRegister(instr->InputAt(0));
2248 Register temp = scratch0();
2249 Register temp2 =
ToRegister(instr->TempAt(0));
2250 Handle<String> class_name = instr->hydrogen()->class_name();
2252 int true_block = chunk_->LookupDestination(instr->true_block_id());
2253 int false_block = chunk_->LookupDestination(instr->false_block_id());
2255 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2256 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2258 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2260 EmitBranch(true_block, false_block,
eq);
2264 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2265 Register reg =
ToRegister(instr->InputAt(0));
2266 Register temp =
ToRegister(instr->TempAt(0));
2267 int true_block = instr->true_block_id();
2268 int false_block = instr->false_block_id();
2271 __ cmp(temp, Operand(instr->map()));
2272 EmitBranch(true_block, false_block,
eq);
2276 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2281 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2283 __ cmp(
r0, Operand(0));
2289 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2290 class DeferredInstanceOfKnownGlobal:
public LDeferredCode {
2292 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2293 LInstanceOfKnownGlobal* instr)
2294 : LDeferredCode(codegen), instr_(instr) { }
2295 virtual void Generate() {
2296 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2298 virtual LInstruction* instr() {
return instr_; }
2299 Label* map_check() {
return &map_check_; }
2301 LInstanceOfKnownGlobal* instr_;
2305 DeferredInstanceOfKnownGlobal* deferred;
2306 deferred =
new(zone()) DeferredInstanceOfKnownGlobal(
this, instr);
2308 Label done, false_result;
2309 Register
object =
ToRegister(instr->InputAt(0));
2310 Register temp =
ToRegister(instr->TempAt(0));
2311 Register result =
ToRegister(instr->result());
2317 __ JumpIfSmi(
object, &false_result);
2323 Register map = temp;
2329 __ bind(deferred->map_check());
2333 Handle<JSGlobalPropertyCell> cell =
2334 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2335 __ mov(
ip, Operand(Handle<Object>(cell)));
2337 __ cmp(map, Operand(
ip));
2338 __ b(
ne, &cache_miss);
2342 __ mov(result, Operand(factory()->the_hole_value()));
2348 __ bind(&cache_miss);
2350 __ LoadRoot(
ip, Heap::kNullValueRootIndex);
2351 __ cmp(
object, Operand(
ip));
2352 __ b(
eq, &false_result);
2355 Condition is_string = masm_->IsObjectStringType(
object, temp);
2356 __ b(is_string, &false_result);
2359 __ b(deferred->entry());
2361 __ bind(&false_result);
2362 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2366 __ bind(deferred->exit());
2371 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2373 Register result =
ToRegister(instr->result());
2383 InstanceofStub stub(flags);
2385 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
2390 Register temp =
ToRegister(instr->TempAt(0));
2393 static const int kAdditionalDelta = 4;
2394 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2395 Label before_push_delta;
2396 __ bind(&before_push_delta);
2397 __ BlockConstPoolFor(kAdditionalDelta);
2399 __ StoreToSafepointRegisterSlot(temp, temp);
2400 CallCodeGeneric(stub.GetCode(),
2401 RelocInfo::CODE_TARGET,
2403 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2404 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2405 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2408 __ StoreToSafepointRegisterSlot(result, result);
2412 void LCodeGen::DoCmpT(LCmpT* instr) {
2416 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2417 __ cmp(
r0, Operand(0));
2419 Condition condition = ComputeCompareCondition(op);
2421 Heap::kTrueValueRootIndex,
2424 Heap::kFalseValueRootIndex,
2429 void LCodeGen::DoReturn(LReturn* instr) {
2434 __ CallRuntime(Runtime::kTraceExit, 1);
2439 __ add(
sp,
sp, Operand(sp_delta));
2444 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2445 Register result =
ToRegister(instr->result());
2446 __ mov(
ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2448 if (instr->hydrogen()->RequiresHoleCheck()) {
2449 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2451 DeoptimizeIf(
eq, instr->environment());
2456 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2460 __ mov(
r2, Operand(instr->name()));
2461 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2462 : RelocInfo::CODE_TARGET_CONTEXT;
2463 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2464 CallCode(ic, mode, instr);
2468 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2470 Register cell = scratch0();
2473 __ mov(cell, Operand(instr->hydrogen()->cell()));
2479 if (instr->hydrogen()->RequiresHoleCheck()) {
2481 Register payload =
ToRegister(instr->TempAt(0));
2483 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2484 DeoptimizeIf(
eq, instr->environment());
2493 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2497 __ mov(
r2, Operand(instr->name()));
2498 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
2499 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2500 : isolate()->builtins()->StoreIC_Initialize();
2501 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2505 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2506 Register context =
ToRegister(instr->context());
2507 Register result =
ToRegister(instr->result());
2509 if (instr->hydrogen()->RequiresHoleCheck()) {
2510 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2512 if (instr->hydrogen()->DeoptimizesOnHole()) {
2513 DeoptimizeIf(
eq, instr->environment());
2515 __ mov(result, Operand(factory()->undefined_value()),
LeaveCC,
eq);
2521 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2522 Register context =
ToRegister(instr->context());
2524 Register scratch = scratch0();
2527 Label skip_assignment;
2529 if (instr->hydrogen()->RequiresHoleCheck()) {
2530 __ ldr(scratch, target);
2531 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2532 __ cmp(scratch,
ip);
2533 if (instr->hydrogen()->DeoptimizesOnHole()) {
2534 DeoptimizeIf(
eq, instr->environment());
2536 __ b(
ne, &skip_assignment);
2540 __ str(value, target);
2541 if (instr->hydrogen()->NeedsWriteBarrier()) {
2542 HType type = instr->hydrogen()->value()->type();
2545 __ RecordWriteContextSlot(context,
2555 __ bind(&skip_assignment);
2559 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2560 Register
object =
ToRegister(instr->InputAt(0));
2561 Register result =
ToRegister(instr->result());
2562 if (instr->hydrogen()->is_in_object()) {
2571 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2574 Handle<String>
name,
2575 LEnvironment* env) {
2576 LookupResult lookup(isolate());
2577 type->LookupInDescriptors(
NULL, *name, &lookup);
2578 ASSERT(lookup.IsFound() || lookup.IsCacheable());
2579 if (lookup.IsFound() && lookup.type() ==
FIELD) {
2580 int index = lookup.GetLocalFieldIndexFromMap(*type);
2592 Handle<JSFunction>
function(lookup.GetConstantFunctionFromMap(*type));
2593 __ LoadHeapObject(result,
function);
2599 while (current != heap->null_value()) {
2600 Handle<HeapObject> link(current);
2601 __ LoadHeapObject(result, link);
2604 DeoptimizeIf(
ne, env);
2607 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2612 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2613 Register
object =
ToRegister(instr->object());
2614 Register result =
ToRegister(instr->result());
2615 Register object_map = scratch0();
2617 int map_count = instr->hydrogen()->types()->length();
2618 bool need_generic = instr->hydrogen()->need_generic();
2620 if (map_count == 0 && !need_generic) {
2621 DeoptimizeIf(
al, instr->environment());
2624 Handle<String> name = instr->hydrogen()->name();
2627 for (
int i = 0; i < map_count; ++i) {
2628 bool last = (i == map_count - 1);
2629 Handle<Map> map = instr->hydrogen()->types()->at(i);
2633 if (last && !need_generic) {
2634 DeoptimizeIf(
ne, instr->environment());
2635 __ bind(&check_passed);
2636 EmitLoadFieldOrConstantFunction(
2637 result,
object, map, name, instr->environment());
2641 __ bind(&check_passed);
2642 EmitLoadFieldOrConstantFunction(
2643 result,
object, map, name, instr->environment());
2649 __ mov(
r2, Operand(name));
2650 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2651 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2657 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2662 __ mov(
r2, Operand(instr->name()));
2663 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2664 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2668 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2669 Register scratch = scratch0();
2670 Register
function =
ToRegister(instr->function());
2671 Register result =
ToRegister(instr->result());
2676 DeoptimizeIf(
ne, instr->environment());
2682 __ b(
ne, &non_instance);
2689 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2691 DeoptimizeIf(
eq, instr->environment());
2695 __ CompareObjectType(result, scratch, scratch,
MAP_TYPE);
2704 __ bind(&non_instance);
2712 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2713 Register result =
ToRegister(instr->result());
2714 Register input =
ToRegister(instr->InputAt(0));
2715 Register scratch = scratch0();
2718 if (FLAG_debug_code) {
2721 __ LoadRoot(
ip, Heap::kFixedArrayMapRootIndex);
2722 __ cmp(scratch,
ip);
2724 __ LoadRoot(
ip, Heap::kFixedCOWArrayMapRootIndex);
2725 __ cmp(scratch,
ip);
2740 __ Abort(
"Check for fast or external elements failed.");
2746 void LCodeGen::DoLoadExternalArrayPointer(
2747 LLoadExternalArrayPointer* instr) {
2748 Register to_reg =
ToRegister(instr->result());
2749 Register from_reg =
ToRegister(instr->InputAt(0));
2755 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2756 Register arguments =
ToRegister(instr->arguments());
2757 Register length =
ToRegister(instr->length());
2759 Register result =
ToRegister(instr->result());
2763 __ sub(length, length, index,
SetCC);
2764 DeoptimizeIf(
ls, instr->environment());
2768 __ add(length, length, Operand(1));
2773 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2774 Register elements =
ToRegister(instr->elements());
2775 Register key = EmitLoadRegister(instr->key(), scratch0());
2776 Register result =
ToRegister(instr->result());
2777 Register scratch = scratch0();
2786 if (instr->hydrogen()->RequiresHoleCheck()) {
2789 DeoptimizeIf(
ne, instr->environment());
2791 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2792 __ cmp(result, scratch);
2793 DeoptimizeIf(
eq, instr->environment());
2799 void LCodeGen::DoLoadKeyedFastDoubleElement(
2800 LLoadKeyedFastDoubleElement* instr) {
2801 Register elements =
ToRegister(instr->elements());
2802 bool key_is_constant = instr->key()->IsConstantOperand();
2804 DwVfpRegister result = ToDoubleRegister(instr->result());
2805 Register scratch = scratch0();
2809 int constant_key = 0;
2810 if (key_is_constant) {
2812 if (constant_key & 0xF0000000) {
2813 Abort(
"array index constant value too big.");
2819 Operand operand = key_is_constant
2820 ? Operand(((constant_key + instr->additional_index()) << shift_size) +
2822 : Operand(key,
LSL, shift_size);
2823 __ add(elements, elements, operand);
2824 if (!key_is_constant) {
2825 __ add(elements, elements,
2827 (instr->additional_index() << shift_size)));
2830 if (instr->hydrogen()->RequiresHoleCheck()) {
2833 DeoptimizeIf(
eq, instr->environment());
2836 __ vldr(result, elements, 0);
2840 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2841 LLoadKeyedSpecializedArrayElement* instr) {
2842 Register external_pointer =
ToRegister(instr->external_pointer());
2845 bool key_is_constant = instr->key()->IsConstantOperand();
2846 int constant_key = 0;
2847 if (key_is_constant) {
2849 if (constant_key & 0xF0000000) {
2850 Abort(
"array index constant value too big.");
2856 int additional_offset = instr->additional_index() << shift_size;
2860 CpuFeatures::Scope scope(
VFP3);
2861 DwVfpRegister result = ToDoubleRegister(instr->result());
2862 Operand operand = key_is_constant
2863 ? Operand(constant_key << shift_size)
2864 : Operand(key,
LSL, shift_size);
2865 __ add(scratch0(), external_pointer, operand);
2867 __ vldr(result.low(), scratch0(), additional_offset);
2868 __ vcvt_f64_f32(result, result.low());
2870 __ vldr(result, scratch0(), additional_offset);
2873 Register result =
ToRegister(instr->result());
2874 if (instr->additional_index() != 0 && !key_is_constant) {
2875 __ add(scratch0(), key, Operand(instr->additional_index()));
2879 (constant_key << shift_size) + additional_offset)
2880 : (instr->additional_index() == 0
2882 :
MemOperand(external_pointer, scratch0(),
LSL, shift_size)));
2883 switch (elements_kind) {
2885 __ ldrsb(result, mem_operand);
2889 __ ldrb(result, mem_operand);
2892 __ ldrsh(result, mem_operand);
2895 __ ldrh(result, mem_operand);
2898 __ ldr(result, mem_operand);
2901 __ ldr(result, mem_operand);
2902 __ cmp(result, Operand(0x80000000));
2906 DeoptimizeIf(
cs, instr->environment());
2925 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2929 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2930 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2934 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2935 Register scratch = scratch0();
2936 Register result =
ToRegister(instr->result());
2938 if (instr->hydrogen()->from_inlined()) {
2939 __ sub(result,
sp, Operand(2 * kPointerSize));
2942 Label done, adapted;
2955 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2956 Register elem =
ToRegister(instr->InputAt(0));
2957 Register result =
ToRegister(instr->result());
2963 __ mov(result, Operand(scope()->num_parameters()));
2970 __ SmiUntag(result);
2977 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2978 Register receiver =
ToRegister(instr->receiver());
2979 Register
function =
ToRegister(instr->function());
2980 Register scratch = scratch0();
2985 Label global_object, receiver_ok;
2995 __ b(
ne, &receiver_ok);
2999 __ b(
ne, &receiver_ok);
3002 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3003 __ cmp(receiver, scratch);
3004 __ b(
eq, &global_object);
3005 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3006 __ cmp(receiver, scratch);
3007 __ b(
eq, &global_object);
3011 DeoptimizeIf(
eq, instr->environment());
3013 DeoptimizeIf(
lt, instr->environment());
3014 __ jmp(&receiver_ok);
3016 __ bind(&global_object);
3020 __ bind(&receiver_ok);
3024 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3025 Register receiver =
ToRegister(instr->receiver());
3026 Register
function =
ToRegister(instr->function());
3027 Register length =
ToRegister(instr->length());
3028 Register elements =
ToRegister(instr->elements());
3029 Register scratch = scratch0();
3036 const uint32_t kArgumentsLimit = 1 *
KB;
3037 __ cmp(length, Operand(kArgumentsLimit));
3038 DeoptimizeIf(
hi, instr->environment());
3043 __ mov(receiver, length);
3045 __ add(elements, elements, Operand(1 * kPointerSize));
3051 __ cmp(length, Operand(0));
3056 __ sub(length, length, Operand(1),
SetCC);
3060 ASSERT(instr->HasPointerMap());
3061 LPointerMap* pointers = instr->pointer_map();
3062 RecordPosition(pointers->position());
3063 SafepointGenerator safepoint_generator(
3064 this, pointers, Safepoint::kLazyDeopt);
3067 ParameterCount actual(receiver);
3074 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3075 LOperand* argument = instr->InputAt(0);
3076 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3077 Abort(
"DoPushArgument not implemented for double type.");
3079 Register argument_reg = EmitLoadRegister(argument,
ip);
3080 __ push(argument_reg);
3085 void LCodeGen::DoDrop(LDrop* instr) {
3086 __ Drop(instr->count());
3090 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3091 Register result =
ToRegister(instr->result());
3092 __ LoadHeapObject(result, instr->hydrogen()->closure());
3096 void LCodeGen::DoContext(LContext* instr) {
3097 Register result =
ToRegister(instr->result());
3102 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3103 Register context =
ToRegister(instr->context());
3104 Register result =
ToRegister(instr->result());
3110 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3112 __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3113 __ push(scratch0());
3114 __ mov(scratch0(), Operand(
Smi::FromInt(instr->hydrogen()->flags())));
3115 __ push(scratch0());
3116 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3120 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3121 Register result =
ToRegister(instr->result());
3126 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3127 Register global =
ToRegister(instr->global());
3128 Register result =
ToRegister(instr->result());
3133 void LCodeGen::CallKnownFunction(Handle<JSFunction>
function,
3135 LInstruction* instr,
3138 bool can_invoke_directly = !
function->NeedsArgumentsAdaption() ||
3139 function->shared()->formal_parameter_count() == arity;
3141 LPointerMap* pointers = instr->pointer_map();
3142 RecordPosition(pointers->position());
3144 if (can_invoke_directly) {
3145 if (r1_state == R1_UNINITIALIZED) {
3146 __ LoadHeapObject(
r1,
function);
3150 bool change_context =
3151 (info()->closure()->context() !=
function->context()) ||
3152 scope()->contains_with() ||
3153 (scope()->num_heap_slots() > 0);
3154 if (change_context) {
3160 if (!function->NeedsArgumentsAdaption()) {
3161 __ mov(
r0, Operand(arity));
3165 __ SetCallKind(
r5, call_kind);
3170 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3172 SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
3173 ParameterCount count(arity);
3174 __ InvokeFunction(
function, count,
CALL_FUNCTION, generator, call_kind);
3182 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3184 CallKnownFunction(instr->function(),
3192 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3193 Register input =
ToRegister(instr->InputAt(0));
3194 Register result =
ToRegister(instr->result());
3195 Register scratch = scratch0();
3199 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
3200 __ cmp(scratch, Operand(
ip));
3201 DeoptimizeIf(
ne, instr->environment());
3204 Register exponent = scratch0();
3211 __ Move(result, input);
3217 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
3221 Register tmp1 = input.is(
r1) ?
r0 :
r1;
3222 Register tmp2 = input.
is(
r2) ?
r0 :
r2;
3223 Register tmp3 = input.
is(
r3) ?
r0 :
r3;
3224 Register tmp4 = input.
is(
r4) ?
r0 :
r4;
3228 Label allocated, slow;
3229 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3230 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3236 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3238 if (!tmp1.is(
r0))
__ mov(tmp1, Operand(
r0));
3240 __ LoadFromSafepointRegisterSlot(input, input);
3243 __ bind(&allocated);
3251 __ StoreToSafepointRegisterSlot(tmp1, result);
3258 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3259 Register input =
ToRegister(instr->InputAt(0));
3260 Register result =
ToRegister(instr->result());
3261 __ cmp(input, Operand(0));
3262 __ Move(result, input,
pl);
3266 __ rsb(result, input, Operand(0),
SetCC,
mi);
3268 DeoptimizeIf(
vs, instr->environment());
3272 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3274 class DeferredMathAbsTaggedHeapNumber:
public LDeferredCode {
3276 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3277 LUnaryMathOperation* instr)
3278 : LDeferredCode(codegen), instr_(instr) { }
3279 virtual void Generate() {
3280 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3282 virtual LInstruction* instr() {
return instr_; }
3284 LUnaryMathOperation* instr_;
3287 Representation r = instr->hydrogen()->value()->representation();
3289 DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
3290 DwVfpRegister result = ToDoubleRegister(instr->result());
3291 __ vabs(result, input);
3292 }
else if (r.IsInteger32()) {
3293 EmitIntegerMathAbs(instr);
3296 DeferredMathAbsTaggedHeapNumber* deferred =
3297 new(zone()) DeferredMathAbsTaggedHeapNumber(
this, instr);
3298 Register input =
ToRegister(instr->InputAt(0));
3300 __ JumpIfNotSmi(input, deferred->entry());
3302 EmitIntegerMathAbs(instr);
3303 __ bind(deferred->exit());
3308 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3310 Register result =
ToRegister(instr->result());
3311 SwVfpRegister single_scratch = double_scratch0().low();
3312 Register scratch1 = scratch0();
3313 Register scratch2 =
ToRegister(instr->TempAt(0));
3320 DeoptimizeIf(
ne, instr->environment());
3323 __ vmov(result, single_scratch);
3328 __ cmp(result, Operand(0));
3330 __ vmov(scratch1, input.high());
3332 DeoptimizeIf(
ne, instr->environment());
3338 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3340 Register result =
ToRegister(instr->result());
3341 Register scratch = scratch0();
3342 Label done, check_sign_on_zero;
3345 __ vmov(result, input.high());
3355 __ b(
le, &check_sign_on_zero);
3363 DeoptimizeIf(
ge, instr->environment());
3368 __ Vmov(double_scratch0(), 0.5);
3369 __ vadd(double_scratch0(), input, double_scratch0());
3373 __ vmov(result, double_scratch0().high());
3374 __ eor(result, result, Operand(scratch),
SetCC);
3376 DeoptimizeIf(
mi, instr->environment());
3383 double_scratch0().low(),
3387 DeoptimizeIf(
ne, instr->environment());
3388 __ vmov(result, double_scratch0().low());
3392 __ cmp(result, Operand(0));
3394 __ bind(&check_sign_on_zero);
3395 __ vmov(scratch, input.high());
3397 DeoptimizeIf(
ne, instr->environment());
3403 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3406 __ vsqrt(result, input);
3410 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3420 __ VFPCompareAndSetFlags(input, temp);
3421 __ vneg(result, temp,
eq);
3425 __ vadd(result, input, kDoubleRegZero);
3426 __ vsqrt(result, result);
3431 void LCodeGen::DoPower(LPower* instr) {
3432 Representation exponent_type = instr->hydrogen()->right()->representation();
3435 ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3436 ToDoubleRegister(instr->InputAt(1)).is(
d2));
3437 ASSERT(!instr->InputAt(1)->IsRegister() ||
3439 ASSERT(ToDoubleRegister(instr->InputAt(0)).is(
d1));
3440 ASSERT(ToDoubleRegister(instr->result()).is(
d3));
3442 if (exponent_type.IsTagged()) {
3444 __ JumpIfSmi(
r2, &no_deopt);
3446 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
3448 DeoptimizeIf(
ne, instr->environment());
3452 }
else if (exponent_type.IsInteger32()) {
3456 ASSERT(exponent_type.IsDouble());
3463 void LCodeGen::DoRandom(LRandom* instr) {
3464 class DeferredDoRandom:
public LDeferredCode {
3466 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3467 : LDeferredCode(codegen), instr_(instr) { }
3468 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3469 virtual LInstruction* instr() {
return instr_; }
3474 DeferredDoRandom* deferred =
new(zone()) DeferredDoRandom(
this, instr);
3478 ASSERT(ToDoubleRegister(instr->result()).is(
d7));
3481 static const int kSeedSize =
sizeof(uint32_t);
3485 static const int kRandomSeedOffset =
3492 __ cmp(
r1, Operand(0));
3493 __ b(
eq, deferred->entry());
3500 __ and_(
r3,
r1, Operand(0xFFFF));
3501 __ mov(
r4, Operand(18273));
3508 __ and_(
r3,
r0, Operand(0xFFFF));
3509 __ mov(
r4, Operand(36969));
3516 __ and_(
r0,
r0, Operand(0x3FFFF));
3519 __ bind(deferred->exit());
3522 __ mov(
r1, Operand(0x41000000));
3523 __ orr(
r1,
r1, Operand(0x300000));
3534 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3535 __ PrepareCallCFunction(1, scratch0());
3536 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3541 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3542 ASSERT(ToDoubleRegister(instr->result()).is(
d2));
3545 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3549 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3550 ASSERT(ToDoubleRegister(instr->result()).is(
d2));
3553 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3557 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3558 ASSERT(ToDoubleRegister(instr->result()).is(
d2));
3561 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3565 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3566 ASSERT(ToDoubleRegister(instr->result()).is(
d2));
3569 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3573 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3574 switch (instr->op()) {
3588 DoMathPowHalf(instr);
3603 Abort(
"Unimplemented type of LUnaryMathOperation.");
3609 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3611 ASSERT(instr->HasPointerMap());
3613 if (instr->known_function().is_null()) {
3614 LPointerMap* pointers = instr->pointer_map();
3615 RecordPosition(pointers->position());
3616 SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
3617 ParameterCount count(instr->arity());
3621 CallKnownFunction(instr->known_function(),
3625 R1_CONTAINS_TARGET);
3630 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3633 int arity = instr->arity();
3635 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3636 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3641 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3644 int arity = instr->arity();
3645 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3647 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3648 __ mov(
r2, Operand(instr->name()));
3649 CallCode(ic, mode, instr);
3655 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3659 int arity = instr->arity();
3661 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3666 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3669 int arity = instr->arity();
3670 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3672 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3673 __ mov(
r2, Operand(instr->name()));
3674 CallCode(ic, mode, instr);
3679 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3681 CallKnownFunction(instr->target(),
3689 void LCodeGen::DoCallNew(LCallNew* instr) {
3694 __ mov(
r0, Operand(instr->arity()));
3695 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3699 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3700 CallRuntime(instr->function(), instr->arity(), instr);
3704 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3705 Register
object =
ToRegister(instr->object());
3707 Register scratch = scratch0();
3708 int offset = instr->offset();
3710 ASSERT(!
object.is(value));
3712 if (!instr->transition().is_null()) {
3713 __ mov(scratch, Operand(instr->transition()));
3715 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3716 Register temp =
ToRegister(instr->TempAt(0));
3718 __ RecordWriteField(
object,
3730 HType type = instr->hydrogen()->value()->type();
3733 if (instr->is_in_object()) {
3735 if (instr->hydrogen()->NeedsWriteBarrier()) {
3737 __ RecordWriteField(
object,
3749 if (instr->hydrogen()->NeedsWriteBarrier()) {
3752 __ RecordWriteField(scratch,
3765 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3770 __ mov(
r2, Operand(instr->name()));
3771 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
3772 ? isolate()->builtins()->StoreIC_Initialize_Strict()
3773 : isolate()->builtins()->StoreIC_Initialize();
3774 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3778 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3780 DeoptimizeIf(
hs, instr->environment());
3784 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3786 Register elements =
ToRegister(instr->object());
3787 Register key = instr->key()->IsRegister() ?
ToRegister(instr->key()) :
no_reg;
3788 Register scratch = scratch0();
3791 if (instr->key()->IsConstantOperand()) {
3792 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3795 (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
3800 if (instr->additional_index() != 0) {
3808 if (instr->hydrogen()->NeedsWriteBarrier()) {
3809 HType type = instr->hydrogen()->value()->type();
3814 __ RecordWrite(elements,
3825 void LCodeGen::DoStoreKeyedFastDoubleElement(
3826 LStoreKeyedFastDoubleElement* instr) {
3827 DwVfpRegister value = ToDoubleRegister(instr->value());
3828 Register elements =
ToRegister(instr->elements());
3830 Register scratch = scratch0();
3831 bool key_is_constant = instr->key()->IsConstantOperand();
3832 int constant_key = 0;
3836 if (key_is_constant) {
3838 if (constant_key & 0xF0000000) {
3839 Abort(
"array index constant value too big.");
3845 Operand operand = key_is_constant
3846 ? Operand((constant_key << shift_size) +
3848 : Operand(key,
LSL, shift_size);
3849 __ add(scratch, elements, operand);
3850 if (!key_is_constant) {
3851 __ add(scratch, scratch,
3855 if (instr->NeedsCanonicalization()) {
3857 __ VFPCompareAndSetFlags(value, value);
3864 __ vstr(value, scratch, instr->additional_index() << shift_size);
3868 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3869 LStoreKeyedSpecializedArrayElement* instr) {
3871 Register external_pointer =
ToRegister(instr->external_pointer());
3874 bool key_is_constant = instr->key()->IsConstantOperand();
3875 int constant_key = 0;
3876 if (key_is_constant) {
3878 if (constant_key & 0xF0000000) {
3879 Abort(
"array index constant value too big.");
3885 int additional_offset = instr->additional_index() << shift_size;
3889 CpuFeatures::Scope scope(
VFP3);
3890 DwVfpRegister value(ToDoubleRegister(instr->value()));
3891 Operand operand(key_is_constant ? Operand(constant_key << shift_size)
3892 : Operand(key,
LSL, shift_size));
3893 __ add(scratch0(), external_pointer, operand);
3895 __ vcvt_f32_f64(double_scratch0().low(), value);
3896 __ vstr(double_scratch0().low(), scratch0(), additional_offset);
3898 __ vstr(value, scratch0(), additional_offset);
3902 if (instr->additional_index() != 0 && !key_is_constant) {
3903 __ add(scratch0(), key, Operand(instr->additional_index()));
3907 ((constant_key + instr->additional_index())
3909 : (instr->additional_index() == 0
3911 :
MemOperand(external_pointer, scratch0(),
LSL, shift_size)));
3912 switch (elements_kind) {
3916 __ strb(value, mem_operand);
3920 __ strh(value, mem_operand);
3924 __ str(value, mem_operand);
3943 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3948 Handle<Code> ic = (instr->strict_mode_flag() ==
kStrictMode)
3949 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3950 : isolate()->builtins()->KeyedStoreIC_Initialize();
3951 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3955 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3956 Register object_reg =
ToRegister(instr->object());
3957 Register new_map_reg =
ToRegister(instr->new_map_reg());
3958 Register scratch = scratch0();
3960 Handle<Map> from_map = instr->original_map();
3961 Handle<Map> to_map = instr->transitioned_map();
3965 Label not_applicable;
3967 __ cmp(scratch, Operand(from_map));
3968 __ b(
ne, ¬_applicable);
3969 __ mov(new_map_reg, Operand(to_map));
3978 Register fixed_object_reg =
ToRegister(instr->temp_reg());
3981 __ mov(fixed_object_reg, object_reg);
3982 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3983 RelocInfo::CODE_TARGET, instr);
3986 Register fixed_object_reg =
ToRegister(instr->temp_reg());
3989 __ mov(fixed_object_reg, object_reg);
3990 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3991 RelocInfo::CODE_TARGET, instr);
3995 __ bind(¬_applicable);
3999 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4003 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4007 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4008 class DeferredStringCharCodeAt:
public LDeferredCode {
4010 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4011 : LDeferredCode(codegen), instr_(instr) { }
4012 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
4013 virtual LInstruction* instr() {
return instr_; }
4015 LStringCharCodeAt* instr_;
4018 DeferredStringCharCodeAt* deferred =
4019 new(zone()) DeferredStringCharCodeAt(
this, instr);
4026 __ bind(deferred->exit());
4030 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4031 Register
string =
ToRegister(instr->string());
4032 Register result =
ToRegister(instr->result());
4033 Register scratch = scratch0();
4038 __ mov(result, Operand(0));
4040 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4044 if (instr->index()->IsConstantOperand()) {
4053 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4054 if (FLAG_debug_code) {
4055 __ AbortIfNotSmi(
r0);
4058 __ StoreToSafepointRegisterSlot(
r0, result);
4062 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4063 class DeferredStringCharFromCode:
public LDeferredCode {
4065 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4066 : LDeferredCode(codegen), instr_(instr) { }
4067 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4068 virtual LInstruction* instr() {
return instr_; }
4070 LStringCharFromCode* instr_;
4073 DeferredStringCharFromCode* deferred =
4074 new(zone()) DeferredStringCharFromCode(
this, instr);
4076 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4077 Register char_code =
ToRegister(instr->char_code());
4078 Register result =
ToRegister(instr->result());
4079 ASSERT(!char_code.is(result));
4082 __ b(
hi, deferred->entry());
4083 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4086 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
4088 __ b(
eq, deferred->entry());
4089 __ bind(deferred->exit());
4093 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4094 Register char_code =
ToRegister(instr->char_code());
4095 Register result =
ToRegister(instr->result());
4100 __ mov(result, Operand(0));
4102 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4103 __ SmiTag(char_code);
4105 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4106 __ StoreToSafepointRegisterSlot(
r0, result);
4110 void LCodeGen::DoStringLength(LStringLength* instr) {
4111 Register
string =
ToRegister(instr->InputAt(0));
4112 Register result =
ToRegister(instr->result());
4117 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4118 LOperand* input = instr->InputAt(0);
4119 ASSERT(input->IsRegister() || input->IsStackSlot());
4120 LOperand* output = instr->result();
4121 ASSERT(output->IsDoubleRegister());
4122 SwVfpRegister single_scratch = double_scratch0().low();
4123 if (input->IsStackSlot()) {
4124 Register scratch = scratch0();
4125 __ ldr(scratch, ToMemOperand(input));
4126 __ vmov(single_scratch, scratch);
4130 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4134 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4135 class DeferredNumberTagI:
public LDeferredCode {
4137 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4138 : LDeferredCode(codegen), instr_(instr) { }
4139 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
4140 virtual LInstruction* instr() {
return instr_; }
4142 LNumberTagI* instr_;
4145 Register src =
ToRegister(instr->InputAt(0));
4148 DeferredNumberTagI* deferred =
new(zone()) DeferredNumberTagI(
this, instr);
4150 __ b(
vs, deferred->entry());
4151 __ bind(deferred->exit());
4155 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
4157 Register src =
ToRegister(instr->InputAt(0));
4160 SwVfpRegister flt_scratch = dbl_scratch.low();
4163 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4170 __ SmiUntag(src, dst);
4171 __ eor(src, src, Operand(0x80000000));
4173 __ vmov(flt_scratch, src);
4174 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
4175 if (FLAG_inline_new) {
4176 __ LoadRoot(
r6, Heap::kHeapNumberMapRootIndex);
4177 __ AllocateHeapNumber(
r5,
r3,
r4,
r6, &slow);
4188 __ mov(
ip, Operand(0));
4189 __ StoreToSafepointRegisterSlot(
ip, dst);
4190 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4198 __ StoreToSafepointRegisterSlot(dst, dst);
4202 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4203 class DeferredNumberTagD:
public LDeferredCode {
4205 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4206 : LDeferredCode(codegen), instr_(instr) { }
4207 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4208 virtual LInstruction* instr() {
return instr_; }
4210 LNumberTagD* instr_;
4214 Register scratch = scratch0();
4216 Register temp1 =
ToRegister(instr->TempAt(0));
4217 Register temp2 =
ToRegister(instr->TempAt(1));
4219 DeferredNumberTagD* deferred =
new(zone()) DeferredNumberTagD(
this, instr);
4220 if (FLAG_inline_new) {
4221 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4222 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4224 __ jmp(deferred->entry());
4226 __ bind(deferred->exit());
4232 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4237 __ mov(reg, Operand(0));
4239 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4240 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4241 __ StoreToSafepointRegisterSlot(
r0, reg);
4245 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4251 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4252 Register input =
ToRegister(instr->InputAt(0));
4253 Register result =
ToRegister(instr->result());
4254 if (instr->needs_check()) {
4257 __ SmiUntag(result, input,
SetCC);
4258 DeoptimizeIf(
cs, instr->environment());
4260 __ SmiUntag(result, input);
4265 void LCodeGen::EmitNumberUntagD(Register input_reg,
4267 bool deoptimize_on_undefined,
4268 bool deoptimize_on_minus_zero,
4269 LEnvironment* env) {
4270 Register scratch = scratch0();
4271 SwVfpRegister flt_scratch = double_scratch0().low();
4272 ASSERT(!result_reg.is(double_scratch0()));
4274 Label load_smi, heap_number, done;
4277 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4281 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
4282 __ cmp(scratch, Operand(
ip));
4283 if (deoptimize_on_undefined) {
4284 DeoptimizeIf(
ne, env);
4287 __ b(
eq, &heap_number);
4289 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
4290 __ cmp(input_reg, Operand(
ip));
4291 DeoptimizeIf(
ne, env);
4294 __ LoadRoot(
ip, Heap::kNanValueRootIndex);
4299 __ bind(&heap_number);
4304 if (deoptimize_on_minus_zero) {
4305 __ vmov(
ip, result_reg.low());
4306 __ cmp(
ip, Operand(0));
4308 __ vmov(
ip, result_reg.high());
4310 DeoptimizeIf(
eq, env);
4317 __ vmov(flt_scratch, scratch);
4318 __ vcvt_f64_s32(result_reg, flt_scratch);
4323 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4324 Register input_reg =
ToRegister(instr->InputAt(0));
4325 Register scratch1 = scratch0();
4326 Register scratch2 =
ToRegister(instr->TempAt(0));
4327 DwVfpRegister double_scratch = double_scratch0();
4328 SwVfpRegister single_scratch = double_scratch.low();
4330 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4331 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4339 __ adc(input_reg, input_reg, Operand(input_reg));
4343 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
4344 __ cmp(scratch1, Operand(
ip));
4346 if (instr->truncating()) {
4347 Register scratch3 =
ToRegister(instr->TempAt(1));
4348 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
4349 ASSERT(!scratch3.is(input_reg) &&
4350 !scratch3.is(scratch1) &&
4351 !scratch3.is(scratch2));
4355 __ b(
eq, &heap_number);
4358 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
4359 __ cmp(input_reg, Operand(
ip));
4360 DeoptimizeIf(
ne, instr->environment());
4361 __ mov(input_reg, Operand(0));
4364 __ bind(&heap_number);
4368 __ EmitECMATruncate(input_reg,
4376 CpuFeatures::Scope scope(
VFP3);
4378 DeoptimizeIf(
ne, instr->environment());
4388 DeoptimizeIf(
ne, instr->environment());
4390 __ vmov(input_reg, single_scratch);
4393 __ cmp(input_reg, Operand(0));
4395 __ vmov(scratch1, double_scratch.high());
4397 DeoptimizeIf(
ne, instr->environment());
4404 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4405 class DeferredTaggedToI:
public LDeferredCode {
4407 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4408 : LDeferredCode(codegen), instr_(instr) { }
4409 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4410 virtual LInstruction* instr() {
return instr_; }
4415 LOperand* input = instr->InputAt(0);
4416 ASSERT(input->IsRegister());
4417 ASSERT(input->Equals(instr->result()));
4421 DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(
this, instr);
4425 __ SmiUntag(input_reg,
SetCC);
4428 __ b(
cs, deferred->entry());
4429 __ bind(deferred->exit());
4433 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4434 LOperand* input = instr->InputAt(0);
4435 ASSERT(input->IsRegister());
4436 LOperand* result = instr->result();
4437 ASSERT(result->IsDoubleRegister());
4442 EmitNumberUntagD(input_reg, result_reg,
4443 instr->hydrogen()->deoptimize_on_undefined(),
4444 instr->hydrogen()->deoptimize_on_minus_zero(),
4445 instr->environment());
4449 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4450 Register result_reg =
ToRegister(instr->result());
4451 Register scratch1 = scratch0();
4452 Register scratch2 =
ToRegister(instr->TempAt(0));
4453 DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
4454 SwVfpRegister single_scratch = double_scratch0().low();
4458 if (instr->truncating()) {
4459 Register scratch3 =
ToRegister(instr->TempAt(1));
4460 __ EmitECMATruncate(result_reg,
4468 __ EmitVFPTruncate(rounding_mode,
4476 DeoptimizeIf(
ne, instr->environment());
4478 __ vmov(result_reg, single_scratch);
4484 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4485 LOperand* input = instr->InputAt(0);
4487 DeoptimizeIf(
ne, instr->environment());
4491 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4492 LOperand* input = instr->InputAt(0);
4494 DeoptimizeIf(
eq, instr->environment());
4498 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4499 Register input =
ToRegister(instr->InputAt(0));
4500 Register scratch = scratch0();
4505 if (instr->hydrogen()->is_interval_check()) {
4508 instr->hydrogen()->GetCheckInterval(&first, &last);
4510 __ cmp(scratch, Operand(first));
4513 if (first == last) {
4514 DeoptimizeIf(
ne, instr->environment());
4516 DeoptimizeIf(
lo, instr->environment());
4519 __ cmp(scratch, Operand(last));
4520 DeoptimizeIf(
hi, instr->environment());
4526 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4530 __ tst(scratch, Operand(mask));
4531 DeoptimizeIf(tag == 0 ?
ne :
eq, instr->environment());
4533 __ and_(scratch, scratch, Operand(mask));
4534 __ cmp(scratch, Operand(tag));
4535 DeoptimizeIf(
ne, instr->environment());
4541 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4543 Handle<JSFunction> target = instr->hydrogen()->target();
4544 if (isolate()->heap()->InNewSpace(*target)) {
4546 Handle<JSGlobalPropertyCell> cell =
4547 isolate()->factory()->NewJSGlobalPropertyCell(target);
4548 __ mov(
ip, Operand(Handle<Object>(cell)));
4552 __ cmp(reg, Operand(target));
4554 DeoptimizeIf(
ne, instr->environment());
4558 void LCodeGen::DoCheckMapCommon(Register reg,
4562 LEnvironment* env) {
4564 __ CompareMap(reg, scratch, map, &success, mode);
4565 DeoptimizeIf(
ne, env);
4570 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4571 Register scratch = scratch0();
4572 LOperand* input = instr->InputAt(0);
4573 ASSERT(input->IsRegister());
4577 SmallMapList* map_set = instr->hydrogen()->map_set();
4578 for (
int i = 0; i < map_set->length() - 1; i++) {
4579 Handle<Map> map = map_set->at(i);
4583 Handle<Map> map = map_set->last();
4589 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4591 Register result_reg =
ToRegister(instr->result());
4593 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4597 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4598 Register unclamped_reg =
ToRegister(instr->unclamped());
4599 Register result_reg =
ToRegister(instr->result());
4600 __ ClampUint8(result_reg, unclamped_reg);
4604 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4605 Register scratch = scratch0();
4606 Register input_reg =
ToRegister(instr->unclamped());
4607 Register result_reg =
ToRegister(instr->result());
4609 Label is_smi, done, heap_number;
4612 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
4616 __ cmp(scratch, Operand(factory()->heap_number_map()));
4617 __ b(
eq, &heap_number);
4621 __ cmp(input_reg, Operand(factory()->undefined_value()));
4622 DeoptimizeIf(
ne, instr->environment());
4623 __ mov(result_reg, Operand(0));
4627 __ bind(&heap_number);
4630 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4635 __ ClampUint8(result_reg, result_reg);
4641 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4642 Register temp1 =
ToRegister(instr->TempAt(0));
4643 Register temp2 =
ToRegister(instr->TempAt(1));
4645 Handle<JSObject> holder = instr->holder();
4646 Handle<JSObject> current_prototype = instr->prototype();
4649 __ LoadHeapObject(temp1, current_prototype);
4652 while (!current_prototype.is_identical_to(holder)) {
4653 DoCheckMapCommon(temp1, temp2,
4654 Handle<Map>(current_prototype->map()),
4657 Handle<JSObject>(
JSObject::cast(current_prototype->GetPrototype()));
4659 __ LoadHeapObject(temp1, current_prototype);
4663 DoCheckMapCommon(temp1, temp2,
4664 Handle<Map>(current_prototype->map()),
4666 DeoptimizeIf(
ne, instr->environment());
4670 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4671 class DeferredAllocateObject:
public LDeferredCode {
4673 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4674 : LDeferredCode(codegen), instr_(instr) { }
4675 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4676 virtual LInstruction* instr() {
return instr_; }
4678 LAllocateObject* instr_;
4681 DeferredAllocateObject* deferred =
4682 new(zone()) DeferredAllocateObject(
this, instr);
4684 Register result =
ToRegister(instr->result());
4685 Register scratch =
ToRegister(instr->TempAt(0));
4686 Register scratch2 =
ToRegister(instr->TempAt(1));
4687 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4688 Handle<Map> initial_map(constructor->initial_map());
4689 int instance_size = initial_map->instance_size();
4690 ASSERT(initial_map->pre_allocated_property_fields() +
4691 initial_map->unused_property_fields() -
4692 initial_map->inobject_properties() == 0);
4697 ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4698 __ AllocateInNewSpace(instance_size,
4705 __ bind(deferred->exit());
4706 if (FLAG_debug_code) {
4707 Label is_in_new_space;
4708 __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4709 __ Abort(
"Allocated object is not in new-space");
4710 __ bind(&is_in_new_space);
4714 Register map = scratch;
4715 __ LoadHeapObject(map, constructor);
4721 __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4724 if (initial_map->inobject_properties() != 0) {
4725 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4726 for (
int i = 0; i < initial_map->inobject_properties(); i++) {
4734 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4735 Register result =
ToRegister(instr->result());
4736 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4737 Handle<Map> initial_map(constructor->initial_map());
4738 int instance_size = initial_map->instance_size();
4743 __ mov(result, Operand(0));
4745 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
4748 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4749 __ StoreToSafepointRegisterSlot(
r0, result);
4753 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4754 Heap* heap = isolate()->heap();
4756 instr->hydrogen()->boilerplate_elements_kind();
4762 boilerplate_elements_kind,
true)) {
4763 __ LoadHeapObject(
r1, instr->hydrogen()->boilerplate_object());
4770 __ cmp(
r2, Operand(boilerplate_elements_kind));
4771 DeoptimizeIf(
ne, instr->environment());
4779 __ mov(
r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
4783 int length = instr->hydrogen()->length();
4784 if (instr->hydrogen()->IsCopyOnWrite()) {
4785 ASSERT(instr->hydrogen()->depth() == 1);
4788 FastCloneShallowArrayStub stub(mode, length);
4789 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4790 }
else if (instr->hydrogen()->depth() > 1) {
4791 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4793 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4799 FastCloneShallowArrayStub stub(mode, length);
4800 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4805 void LCodeGen::EmitDeepCopy(Handle<JSObject>
object,
4813 Handle<FixedArrayBase> elements(object->elements());
4814 bool has_elements = elements->length() > 0 &&
4815 elements->map() != isolate()->heap()->fixed_cow_array_map();
4819 int object_offset = *offset;
4820 int object_size =
object->map()->instance_size();
4821 int elements_offset = *offset + object_size;
4822 int elements_size = has_elements ? elements->Size() : 0;
4823 *offset += object_size + elements_size;
4826 ASSERT(object->properties()->length() == 0);
4827 int inobject_properties =
object->map()->inobject_properties();
4828 int header_size = object_size - inobject_properties *
kPointerSize;
4831 __ add(
r2, result, Operand(elements_offset));
4839 for (
int i = 0; i < inobject_properties; i++) {
4840 int total_offset = object_offset +
object->GetInObjectPropertyOffset(i);
4841 Handle<Object> value = Handle<Object>(
object->InObjectPropertyAt(i));
4842 if (value->IsJSObject()) {
4844 __ add(
r2, result, Operand(*offset));
4846 __ LoadHeapObject(source, value_object);
4847 EmitDeepCopy(value_object, result, source, offset);
4848 }
else if (value->IsHeapObject()) {
4849 __ LoadHeapObject(
r2, Handle<HeapObject>::cast(value));
4852 __ mov(
r2, Operand(value));
4859 __ LoadHeapObject(source, elements);
4866 int elements_length = has_elements ? elements->length() : 0;
4867 if (elements->IsFixedDoubleArray()) {
4868 Handle<FixedDoubleArray> double_array =
4870 for (
int i = 0; i < elements_length; i++) {
4871 int64_t value = double_array->get_representation(i);
4873 int32_t value_low = value & 0xFFFFFFFF;
4874 int32_t value_high = value >> 32;
4877 __ mov(
r2, Operand(value_low));
4879 __ mov(
r2, Operand(value_high));
4882 }
else if (elements->IsFixedArray()) {
4884 for (
int i = 0; i < elements_length; i++) {
4886 Handle<Object> value(fast_elements->get(i));
4887 if (value->IsJSObject()) {
4889 __ add(
r2, result, Operand(*offset));
4891 __ LoadHeapObject(source, value_object);
4892 EmitDeepCopy(value_object, result, source, offset);
4893 }
else if (value->IsHeapObject()) {
4894 __ LoadHeapObject(
r2, Handle<HeapObject>::cast(value));
4897 __ mov(
r2, Operand(value));
4908 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4909 int size = instr->hydrogen()->total_size();
4911 instr->hydrogen()->boilerplate()->GetElementsKind();
4917 boilerplate_elements_kind,
true)) {
4918 __ LoadHeapObject(
r1, instr->hydrogen()->boilerplate());
4925 __ cmp(
r2, Operand(boilerplate_elements_kind));
4926 DeoptimizeIf(
ne, instr->environment());
4931 Label allocated, runtime_allocate;
4935 __ bind(&runtime_allocate);
4938 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4940 __ bind(&allocated);
4942 __ LoadHeapObject(
r1, instr->hydrogen()->boilerplate());
4943 EmitDeepCopy(instr->hydrogen()->boilerplate(),
r0,
r1, &offset);
4948 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4949 Handle<FixedArray> literals(instr->environment()->closure()->literals());
4950 Handle<FixedArray> constant_properties =
4951 instr->hydrogen()->constant_properties();
4954 __ LoadHeapObject(
r4, literals);
4956 __ mov(
r2, Operand(constant_properties));
4957 int flags = instr->hydrogen()->fast_elements()
4964 int properties_count = constant_properties->length() / 2;
4965 if (instr->hydrogen()->depth() > 1) {
4966 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4969 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4971 FastCloneShallowObjectStub stub(properties_count);
4972 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4977 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4980 CallRuntime(Runtime::kToFastProperties, 1, instr);
4984 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4994 int literal_offset = FixedArray::kHeaderSize +
4997 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
4999 __ b(
ne, &materialized);
5004 __ mov(
r5, Operand(instr->hydrogen()->pattern()));
5005 __ mov(
r4, Operand(instr->hydrogen()->flags()));
5007 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5010 __ bind(&materialized);
5012 Label allocated, runtime_allocate;
5017 __ bind(&runtime_allocate);
5020 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5023 __ bind(&allocated);
5032 if ((size % (2 * kPointerSize)) != 0) {
5039 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5042 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
5043 bool pretenure = instr->hydrogen()->pretenure();
5044 if (!pretenure && shared_info->num_literals() == 0) {
5045 FastNewClosureStub stub(shared_info->language_mode());
5046 __ mov(
r1, Operand(shared_info));
5048 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5050 __ mov(
r2, Operand(shared_info));
5051 __ mov(
r1, Operand(pretenure
5052 ? factory()->true_value()
5053 : factory()->false_value()));
5055 CallRuntime(Runtime::kNewClosure, 3, instr);
5060 void LCodeGen::DoTypeof(LTypeof* instr) {
5061 Register input =
ToRegister(instr->InputAt(0));
5063 CallRuntime(Runtime::kTypeof, 1, instr);
5067 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5068 Register input =
ToRegister(instr->InputAt(0));
5069 int true_block = chunk_->LookupDestination(instr->true_block_id());
5070 int false_block = chunk_->LookupDestination(instr->false_block_id());
5071 Label* true_label = chunk_->GetAssemblyLabel(true_block);
5072 Label* false_label = chunk_->GetAssemblyLabel(false_block);
5074 Condition final_branch_condition = EmitTypeofIs(true_label,
5077 instr->type_literal());
5079 EmitBranch(true_block, false_block, final_branch_condition);
5084 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5087 Handle<String> type_name) {
5089 Register scratch = scratch0();
5090 if (type_name->Equals(heap()->number_symbol())) {
5091 __ JumpIfSmi(input, true_label);
5093 __ LoadRoot(
ip, Heap::kHeapNumberMapRootIndex);
5094 __ cmp(input, Operand(
ip));
5095 final_branch_condition =
eq;
5097 }
else if (type_name->Equals(heap()->string_symbol())) {
5098 __ JumpIfSmi(input, false_label);
5100 __ b(
ge, false_label);
5103 final_branch_condition =
eq;
5105 }
else if (type_name->Equals(heap()->boolean_symbol())) {
5106 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5107 __ b(
eq, true_label);
5108 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5109 final_branch_condition =
eq;
5111 }
else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
5112 __ CompareRoot(input, Heap::kNullValueRootIndex);
5113 final_branch_condition =
eq;
5115 }
else if (type_name->Equals(heap()->undefined_symbol())) {
5116 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5117 __ b(
eq, true_label);
5118 __ JumpIfSmi(input, false_label);
5123 final_branch_condition =
ne;
5125 }
else if (type_name->Equals(heap()->function_symbol())) {
5127 __ JumpIfSmi(input, false_label);
5129 __ b(
eq, true_label);
5131 final_branch_condition =
eq;
5133 }
else if (type_name->Equals(heap()->object_symbol())) {
5134 __ JumpIfSmi(input, false_label);
5135 if (!FLAG_harmony_typeof) {
5136 __ CompareRoot(input, Heap::kNullValueRootIndex);
5137 __ b(
eq, true_label);
5139 __ CompareObjectType(input, input, scratch,
5141 __ b(
lt, false_label);
5143 __ b(
gt, false_label);
5147 final_branch_condition =
eq;
5153 return final_branch_condition;
5157 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5158 Register temp1 =
ToRegister(instr->TempAt(0));
5159 int true_block = chunk_->LookupDestination(instr->true_block_id());
5160 int false_block = chunk_->LookupDestination(instr->false_block_id());
5162 EmitIsConstructCall(temp1, scratch0());
5163 EmitBranch(true_block, false_block,
eq);
5167 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5168 ASSERT(!temp1.is(temp2));
5173 Label check_frame_marker;
5176 __ b(
ne, &check_frame_marker);
5180 __ bind(&check_frame_marker);
5186 void LCodeGen::EnsureSpaceForLazyDeopt() {
5189 int current_pc = masm()->pc_offset();
5191 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5194 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5196 while (padding_size > 0) {
5201 last_lazy_deopt_pc_ = masm()->pc_offset();
5205 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5206 EnsureSpaceForLazyDeopt();
5207 ASSERT(instr->HasEnvironment());
5208 LEnvironment* env = instr->environment();
5209 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5210 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5214 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5215 DeoptimizeIf(
al, instr->environment());
5219 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5220 Register
object =
ToRegister(instr->object());
5222 Register strict = scratch0();
5224 __ Push(
object, key, strict);
5225 ASSERT(instr->HasPointerMap());
5226 LPointerMap* pointers = instr->pointer_map();
5227 RecordPosition(pointers->position());
5228 SafepointGenerator safepoint_generator(
5229 this, pointers, Safepoint::kLazyDeopt);
5230 __ InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, safepoint_generator);
5234 void LCodeGen::DoIn(LIn* instr) {
5238 ASSERT(instr->HasPointerMap());
5239 LPointerMap* pointers = instr->pointer_map();
5240 RecordPosition(pointers->position());
5241 SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
5246 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5247 PushSafepointRegistersScope scope(
this, Safepoint::kWithRegisters);
5248 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5249 RecordSafepointWithLazyDeopt(
5250 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5251 ASSERT(instr->HasEnvironment());
5252 LEnvironment* env = instr->environment();
5253 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5257 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5258 class DeferredStackCheck:
public LDeferredCode {
5260 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5261 : LDeferredCode(codegen), instr_(instr) { }
5262 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5263 virtual LInstruction* instr() {
return instr_; }
5265 LStackCheck* instr_;
5268 ASSERT(instr->HasEnvironment());
5269 LEnvironment* env = instr->environment();
5272 if (instr->hydrogen()->is_function_entry()) {
5275 __ LoadRoot(
ip, Heap::kStackLimitRootIndex);
5278 StackCheckStub stub;
5279 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5280 EnsureSpaceForLazyDeopt();
5282 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5283 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5285 ASSERT(instr->hydrogen()->is_backwards_branch());
5287 DeferredStackCheck* deferred_stack_check =
5288 new(zone()) DeferredStackCheck(
this, instr);
5289 __ LoadRoot(
ip, Heap::kStackLimitRootIndex);
5291 __ b(
lo, deferred_stack_check->entry());
5292 EnsureSpaceForLazyDeopt();
5293 __ bind(instr->done_label());
5294 deferred_stack_check->SetExit(instr->done_label());
5295 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5303 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5307 LEnvironment* environment = instr->environment();
5308 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5309 instr->SpilledDoubleRegisterArray());
5313 ASSERT(!environment->HasBeenRegistered());
5314 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5315 ASSERT(osr_pc_offset_ == -1);
5316 osr_pc_offset_ = masm()->pc_offset();
5320 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5321 __ LoadRoot(
ip, Heap::kUndefinedValueRootIndex);
5323 DeoptimizeIf(
eq, instr->environment());
5325 Register null_value =
r5;
5326 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5327 __ cmp(
r0, null_value);
5328 DeoptimizeIf(
eq, instr->environment());
5331 DeoptimizeIf(
eq, instr->environment());
5335 DeoptimizeIf(
le, instr->environment());
5337 Label use_cache, call_runtime;
5338 __ CheckEnumCache(null_value, &call_runtime);
5344 __ bind(&call_runtime);
5346 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5349 __ LoadRoot(
ip, Heap::kMetaMapRootIndex);
5351 DeoptimizeIf(
ne, instr->environment());
5352 __ bind(&use_cache);
5356 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5358 Register result =
ToRegister(instr->result());
5359 __ LoadInstanceDescriptors(map, result);
5364 __ cmp(result, Operand(0));
5365 DeoptimizeIf(
eq, instr->environment());
5369 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5370 Register
object =
ToRegister(instr->value());
5373 __ cmp(map, scratch0());
5374 DeoptimizeIf(
ne, instr->environment());
5378 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5379 Register
object =
ToRegister(instr->object());
5381 Register result =
ToRegister(instr->result());
5382 Register scratch = scratch0();
5384 Label out_of_object, done;
5385 __ cmp(index, Operand(0));
5386 __ b(
lt, &out_of_object);
5394 __ bind(&out_of_object);
5399 FixedArray::kHeaderSize - kPointerSize));
static const int kCallerFPOffset
static const int kLengthOffset
static const int kBitFieldOffset
static LGap * cast(LInstruction *instr)
const uint32_t kVFPZConditionFlagBit
const intptr_t kSmiTagMask
static const int kCodeEntryOffset
static const int kMaxAsciiCharCode
static const int kPrototypeOrInitialMapOffset
const char * ToCString(const v8::String::Utf8Value &value)
static int SlotOffset(int index)
virtual void AfterCall() const
const DivMagicNumbers DivMagicNumberFor(int32_t divisor)
void PrintF(const char *format,...)
static Smi * FromInt(int value)
bool IsFastObjectElementsKind(ElementsKind kind)
static const int kElementsKindBitCount
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
static HeapObject * cast(Object *obj)
static Handle< T > cast(Handle< S > that)
static const int kGlobalReceiverOffset
static const int kExponentBias
static const int kExternalPointerOffset
virtual ~SafepointGenerator()
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
static const int kCallerSPOffset
#define ASSERT(condition)
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
friend class BlockConstPoolScope
const int kPointerSizeLog2
static const int kInObjectFieldCount
static const int kMaximumSlots
MemOperand GlobalObjectOperand()
static const int kInstanceClassNameOffset
int WhichPowerOf2(uint32_t x)
static const int kGlobalContextOffset
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Handle< String > SubString(Handle< String > str, int start, int end, PretenureFlag pretenure)
static const int kHashFieldOffset
static DwVfpRegister FromAllocationIndex(int index)
Condition ReverseCondition(Condition cond)
const uint32_t kSlotsZapValue
static const int kLiteralsOffset
DwVfpRegister DoubleRegister
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
static const int kExponentShift
static const int kValueOffset
const uint32_t kHoleNanUpper32
static const int kPcLoadDelta
static LConstantOperand * cast(LOperand *op)
const uint32_t kHoleNanLower32
static Register FromAllocationIndex(int index)
static void VPrint(const char *format, va_list args)
static const int kCacheStampOffset
static const int kPropertiesOffset
bool IsFastSmiElementsKind(ElementsKind kind)
static int OffsetOfElementAt(int index)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kElementsOffset
static const int kContainsCachedArrayIndexMask
static Vector< T > New(int length)
int ElementsKindToShiftSize(ElementsKind elements_kind)
Vector< const char > CStrVector(const char *data)
static int OffsetOfElementAt(int index)
static const int kLengthOffset
static int SizeFor(int length)
static const int kHeaderSize
static const int kEnumerationIndexOffset
static const int kMapOffset
static const int kValueOffset
bool is(Register reg) const
static const int kLengthOffset
static Address GetDeoptimizationEntry(int id, BailoutType type)
MemOperand FieldMemOperand(Register object, int offset)
static const int kHasNonInstancePrototype
static const int kContextOffset
static const int kFunctionOffset
ElementsKind GetInitialFastElementsKind()
const uint32_t kVFPVConditionFlagBit
static const uint32_t kSignMask
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
static const int kElementsKindShift
static const int kConstructorOffset
static double canonical_not_the_hole_nan_as_double()
static const int kIsUndetectable
static const int kHeaderSize
static const int kMaximumClonedProperties
static const int kInstrSize
static const int kPrototypeOffset
#define RUNTIME_ENTRY(name, nargs, ressize)
static const int kValueOffset
static const int kMarkerOffset
static const int kExponentBits
static const int kCompilerHintsOffset
static const int kSharedFunctionInfoOffset
Register ToRegister(int num)
static const int kBitField2Offset
static HValue * cast(HValue *value)
static Handle< Code > GetUninitialized(Token::Value op)
static const int kMaximumClonedLength
static const int kExponentOffset
static const int kValueOffset
bool EvalComparison(Token::Value op, double op1, double op2)
static JSObject * cast(Object *obj)
bool IsFastDoubleElementsKind(ElementsKind kind)
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kInstanceTypeOffset
virtual void BeforeCall(int call_size) const
static const int kMantissaOffset