44 eager_deoptimization_entry_code_ =
NULL;
45 lazy_deoptimization_entry_code_ =
NULL;
47 deoptimizing_code_list_ =
NULL;
48 #ifdef ENABLE_DEBUGGER_SUPPORT
49 deoptimized_frame_info_ =
NULL;
55 if (eager_deoptimization_entry_code_ !=
NULL) {
56 Isolate::Current()->memory_allocator()->Free(
57 eager_deoptimization_entry_code_);
58 eager_deoptimization_entry_code_ =
NULL;
60 if (lazy_deoptimization_entry_code_ !=
NULL) {
61 Isolate::Current()->memory_allocator()->Free(
62 lazy_deoptimization_entry_code_);
63 lazy_deoptimization_entry_code_ =
NULL;
68 #ifdef ENABLE_DEBUGGER_SUPPORT
69 void DeoptimizerData::Iterate(ObjectVisitor* v) {
70 if (deoptimized_frame_info_ !=
NULL) {
71 deoptimized_frame_info_->Iterate(v);
85 ASSERT(isolate == Isolate::Current());
100 ASSERT(isolate == Isolate::Current());
103 result->DeleteFrameDescriptions();
110 if (jsframe_index == 0)
return 0;
113 while (jsframe_index >= 0) {
121 return frame_index - 1;
125 #ifdef ENABLE_DEBUGGER_SUPPORT
126 DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
130 ASSERT(isolate == Isolate::Current());
131 ASSERT(frame->is_optimized());
140 SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
141 int deoptimization_index = safepoint_entry.deoptimization_index();
142 ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
146 unsigned stack_slots = code->stack_slots();
147 unsigned fp_to_sp_delta = ((stack_slots + 2) *
kPointerSize);
152 deoptimization_index,
156 Address tos = frame->fp() - fp_to_sp_delta;
157 deoptimizer->FillInputFrame(tos, frame);
169 bool has_arguments_adaptor =
171 deoptimizer->output_[frame_index - 1]->
GetFrameType() ==
174 int construct_offset = has_arguments_adaptor ? 2 : 1;
175 bool has_construct_stub =
176 frame_index >= construct_offset &&
177 deoptimizer->output_[frame_index - construct_offset]->
GetFrameType() ==
178 StackFrame::CONSTRUCT;
182 has_arguments_adaptor,
188 deoptimizer->output_[
189 has_arguments_adaptor ? (frame_index - 1) : frame_index];
191 uint32_t parameters_size = (info->parameters_count() + 1) *
kPointerSize;
196 uint32_t expressions_size = info->expression_count() *
kPointerSize;
198 deoptimizer->output_[frame_index]->
GetTop());
201 deoptimizer->DeleteFrameDescriptions();
204 deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
205 parameters_top, parameters_size, expressions_top, expressions_size, info);
214 void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
216 ASSERT(isolate == Isolate::Current());
217 ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
219 isolate->deoptimizer_data()->deoptimized_frame_info_ =
NULL;
223 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
226 TableEntryGenerator
generator(masm, type, count);
234 if (FLAG_trace_deopt) {
236 reinterpret_cast<intptr_t>(context));
253 if (FLAG_trace_deopt) {
254 PrintF(
"[deoptimize all contexts]\n");
271 Context* context, OptimizedFunctionVisitor* visitor) {
276 ASSERT(context->IsNativeContext());
278 visitor->EnterContext(context);
284 while (!element->IsUndefined()) {
287 element = element_function->next_function_link();
291 for (
int i = 0; i <
snapshot.length(); ++i) {
292 visitor->VisitFunction(
snapshot.at(i));
295 visitor->LeaveContext(context);
300 JSObject*
object, OptimizedFunctionVisitor* visitor) {
303 if (object->IsJSGlobalProxy()) {
305 ASSERT(proto->IsJSGlobalObject());
308 }
else if (object->IsGlobalObject()) {
316 OptimizedFunctionVisitor* visitor) {
320 Object* context = Isolate::Current()->heap()->native_contexts_list();
321 while (!context->IsUndefined()) {
325 if (!global->IsUndefined()) {
334 void Deoptimizer::HandleWeakDeoptimizedCode(
338 RemoveDeoptimizingCode(*node->
code());
340 node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
341 while (node !=
NULL) {
342 ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
350 deoptimizer->DoComputeOutputFrames();
354 Deoptimizer::Deoptimizer(
Isolate* isolate,
355 JSFunction*
function,
360 Code* optimized_code)
363 bailout_id_(bailout_id),
366 fp_to_sp_delta_(fp_to_sp_delta),
367 has_alignment_padding_(0),
372 deferred_arguments_objects_values_(0),
373 deferred_arguments_objects_(0),
374 deferred_heap_numbers_(0) {
375 if (FLAG_trace_deopt && type !=
OSR) {
377 PrintF(
"**** DEOPT FOR DEBUGGER: ");
381 function->PrintName();
384 reinterpret_cast<intptr_t>(from),
386 }
else if (FLAG_trace_osr && type ==
OSR) {
388 function->PrintName();
391 reinterpret_cast<intptr_t>(from),
394 function->shared()->increment_deopt_count();
398 optimized_code_ = function_->
code();
399 if (FLAG_trace_deopt && FLAG_code_comments) {
401 const char* last_comment =
NULL;
402 int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
404 for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
405 RelocInfo* info = it.rinfo();
406 if (info->rmode() == RelocInfo::COMMENT) {
407 last_comment =
reinterpret_cast<const char*
>(info->data());
412 if (
id == bailout_id && last_comment !=
NULL) {
413 PrintF(
" %s\n", last_comment);
419 }
else if (type ==
LAZY) {
420 optimized_code_ = FindDeoptimizingCodeFromAddress(from);
422 }
else if (type ==
OSR) {
426 optimized_code_ = function_->
code();
427 ASSERT(optimized_code_->
kind() == Code::OPTIMIZED_FUNCTION);
430 optimized_code_ = optimized_code;
434 unsigned size = ComputeInputFrameSize();
445 void Deoptimizer::DeleteFrameDescriptions() {
447 for (
int i = 0; i < output_count_; ++i) {
448 if (output_[i] != input_)
delete output_[i];
459 if (
id >= kNumberOfEntries)
return NULL;
463 if (data->eager_deoptimization_entry_code_ ==
NULL) {
464 data->eager_deoptimization_entry_code_ = CreateCode(type);
466 base = data->eager_deoptimization_entry_code_;
468 if (data->lazy_deoptimization_entry_code_ ==
NULL) {
469 data->lazy_deoptimization_entry_code_ = CreateCode(type);
471 base = data->lazy_deoptimization_entry_code_;
482 base = data->eager_deoptimization_entry_code_;
484 base = data->lazy_deoptimization_entry_code_;
487 addr < base->area_start() ||
489 (kNumberOfEntries * table_entry_size_)) {
493 static_cast<int>(addr - base->
area_start()) % table_entry_size_);
494 return static_cast<int>(addr - base->
area_start()) / table_entry_size_;
505 for (
int i = 0; i < length; i++) {
506 if (data->
AstId(i) == id) {
510 PrintF(
"[couldn't find pc offset for node=%d]\n",
id.ToInt());
527 while (node !=
NULL) {
537 void Deoptimizer::DoComputeOutputFrames() {
538 if (bailout_type_ ==
OSR) {
539 DoComputeOsrOutputFrame();
545 if (FLAG_trace_deopt) {
547 (bailout_type_ ==
LAZY ?
" (lazy)" :
""),
548 reinterpret_cast<intptr_t>(function_));
550 PrintF(
" @%d]\n", bailout_id_);
555 DeoptimizationInputData* input_data =
557 BailoutId node_id = input_data->
AstId(bailout_id_);
558 ByteArray* translations = input_data->TranslationByteArray();
559 unsigned translation_index =
560 input_data->TranslationIndex(bailout_id_)->value();
563 TranslationIterator iterator(translations, translation_index);
566 ASSERT(Translation::BEGIN == opcode);
570 int count = iterator.Next();
574 for (
int i = 0; i < count; ++i) {
577 output_count_ = count;
580 for (
int i = 0; i < count; ++i) {
585 case Translation::JS_FRAME:
586 DoComputeJSFrame(&iterator, i);
589 case Translation::ARGUMENTS_ADAPTOR_FRAME:
590 DoComputeArgumentsAdaptorFrame(&iterator, i);
592 case Translation::CONSTRUCT_STUB_FRAME:
593 DoComputeConstructStubFrame(&iterator, i);
595 case Translation::GETTER_STUB_FRAME:
596 DoComputeAccessorStubFrame(&iterator, i,
false);
598 case Translation::SETTER_STUB_FRAME:
599 DoComputeAccessorStubFrame(&iterator, i,
true);
601 case Translation::BEGIN:
603 case Translation::INT32_REGISTER:
604 case Translation::UINT32_REGISTER:
605 case Translation::DOUBLE_REGISTER:
606 case Translation::STACK_SLOT:
607 case Translation::INT32_STACK_SLOT:
608 case Translation::UINT32_STACK_SLOT:
609 case Translation::DOUBLE_STACK_SLOT:
610 case Translation::LITERAL:
611 case Translation::ARGUMENTS_OBJECT:
612 case Translation::DUPLICATE:
619 if (FLAG_trace_deopt) {
620 double ms =
static_cast<double>(
OS::Ticks() - start) / 1000;
621 int index = output_count_ - 1;
622 JSFunction*
function = output_[index]->
GetFunction();
624 reinterpret_cast<intptr_t>(
function));
625 function->PrintName();
629 output_[index]->
GetPc(),
631 static_cast<FullCodeGenerator::State>(
632 output_[index]->GetState()->value())),
633 has_alignment_padding_ ?
"with padding" :
"no padding",
644 for (
int i = 0; i < deferred_arguments_objects_values_.length(); ++i) {
649 deferred_arguments_objects_values_.Clear();
654 for (
int i = 0; i < deferred_heap_numbers_.length(); i++) {
655 HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
657 if (FLAG_trace_deopt) {
658 PrintF(
"Materializing a new heap number %p [%e] in slot %p\n",
659 reinterpret_cast<void*>(*num),
667 for (
int frame_index = 0; frame_index <
jsframe_count(); ++frame_index) {
668 if (frame_index != 0) it->Advance();
674 ArgumentsObjectMaterializationDescriptor descriptor =
675 deferred_arguments_objects_.RemoveLast();
676 const int length = descriptor.arguments_length();
684 NULL)->ToObjectUnchecked()));
685 values.RewindBy(length);
693 ASSERT(array->length() == length);
694 for (
int i = length - 1; i >= 0 ; --i) {
695 array->set(i, *values.RemoveLast());
697 arguments->set_elements(*array);
702 if (FLAG_trace_deopt) {
703 PrintF(
"Materializing %sarguments object for %p: ",
705 reinterpret_cast<void*
>(descriptor.slot_address()));
706 arguments->ShortPrint();
715 #ifdef ENABLE_DEBUGGER_SUPPORT
716 void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
718 uint32_t parameters_size,
720 uint32_t expressions_size,
721 DeoptimizedFrameInfo* info) {
723 Address parameters_bottom = parameters_top + parameters_size;
724 Address expressions_bottom = expressions_top + expressions_size;
725 for (
int i = 0; i < deferred_heap_numbers_.length(); i++) {
726 HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
730 Address slot = d.slot_address();
731 if (parameters_top <= slot && slot < parameters_bottom) {
734 int index = (info->parameters_count() - 1) -
737 if (FLAG_trace_deopt) {
738 PrintF(
"Materializing a new heap number %p [%e] in slot %p"
739 "for parameter slot #%d\n",
740 reinterpret_cast<void*>(*num),
746 info->SetParameter(index, *num);
747 }
else if (expressions_top <= slot && slot < expressions_bottom) {
750 int index = info->expression_count() - 1 -
751 static_cast<int>(slot - expressions_top) /
kPointerSize;
753 if (FLAG_trace_deopt) {
754 PrintF(
"Materializing a new heap number %p [%e] in slot %p"
755 "for expression slot #%d\n",
756 reinterpret_cast<void*>(*num),
762 info->SetExpression(index, *num);
769 void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
771 unsigned output_offset) {
774 const intptr_t kPlaceholder =
reinterpret_cast<intptr_t
>(
Smi::FromInt(0));
779 while (opcode == Translation::DUPLICATE) {
781 iterator->Skip(Translation::NumberOfOperandsFor(opcode));
786 case Translation::BEGIN:
787 case Translation::JS_FRAME:
788 case Translation::ARGUMENTS_ADAPTOR_FRAME:
789 case Translation::CONSTRUCT_STUB_FRAME:
790 case Translation::GETTER_STUB_FRAME:
791 case Translation::SETTER_STUB_FRAME:
792 case Translation::DUPLICATE:
797 int input_reg = iterator->Next();
798 intptr_t input_value = input_->
GetRegister(input_reg);
799 if (FLAG_trace_deopt) {
802 output_[frame_index]->GetTop() + output_offset,
806 reinterpret_cast<Object*
>(input_value)->ShortPrint();
809 output_[frame_index]->
SetFrameSlot(output_offset, input_value);
813 case Translation::INT32_REGISTER: {
814 int input_reg = iterator->Next();
817 if (FLAG_trace_deopt) {
819 " 0x%08" V8PRIxPTR
": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
820 output_[frame_index]->GetTop() + output_offset,
824 is_smi ?
"smi" :
"heap number");
827 intptr_t tagged_value =
828 reinterpret_cast<intptr_t
>(
Smi::FromInt(static_cast<int>(value)));
829 output_[frame_index]->
SetFrameSlot(output_offset, tagged_value);
833 AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
834 static_cast<double>(static_cast<int32_t>(value)));
835 output_[frame_index]->
SetFrameSlot(output_offset, kPlaceholder);
840 case Translation::UINT32_REGISTER: {
841 int input_reg = iterator->Next();
842 uintptr_t value =
static_cast<uintptr_t
>(input_->
GetRegister(input_reg));
844 if (FLAG_trace_deopt) {
846 " 0x%08" V8PRIxPTR
": [top + %d] <- %" V8PRIuPTR
848 output_[frame_index]->GetTop() + output_offset,
852 is_smi ?
"smi" :
"heap number");
855 intptr_t tagged_value =
856 reinterpret_cast<intptr_t
>(
Smi::FromInt(static_cast<int>(value)));
857 output_[frame_index]->
SetFrameSlot(output_offset, tagged_value);
861 AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
862 static_cast<double>(static_cast<uint32_t>(value)));
863 output_[frame_index]->
SetFrameSlot(output_offset, kPlaceholder);
868 case Translation::DOUBLE_REGISTER: {
869 int input_reg = iterator->Next();
871 if (FLAG_trace_deopt) {
872 PrintF(
" 0x%08" V8PRIxPTR
": [top + %d] <- %e ; %s\n",
873 output_[frame_index]->GetTop() + output_offset,
880 AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
881 output_[frame_index]->
SetFrameSlot(output_offset, kPlaceholder);
885 case Translation::STACK_SLOT: {
886 int input_slot_index = iterator->Next();
889 intptr_t input_value = input_->
GetFrameSlot(input_offset);
890 if (FLAG_trace_deopt) {
891 PrintF(
" 0x%08" V8PRIxPTR
": ",
892 output_[frame_index]->GetTop() + output_offset);
893 PrintF(
"[top + %d] <- 0x%08" V8PRIxPTR
" ; [sp + %d] ",
897 reinterpret_cast<Object*
>(input_value)->ShortPrint();
900 output_[frame_index]->
SetFrameSlot(output_offset, input_value);
904 case Translation::INT32_STACK_SLOT: {
905 int input_slot_index = iterator->Next();
906 unsigned input_offset =
910 if (FLAG_trace_deopt) {
911 PrintF(
" 0x%08" V8PRIxPTR
": ",
912 output_[frame_index]->GetTop() + output_offset);
913 PrintF(
"[top + %d] <- %" V8PRIdPTR
" ; [sp + %d] (%s)\n",
917 is_smi ?
"smi" :
"heap number");
920 intptr_t tagged_value =
921 reinterpret_cast<intptr_t
>(
Smi::FromInt(static_cast<int>(value)));
922 output_[frame_index]->
SetFrameSlot(output_offset, tagged_value);
926 AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
927 static_cast<double>(static_cast<int32_t>(value)));
928 output_[frame_index]->
SetFrameSlot(output_offset, kPlaceholder);
933 case Translation::UINT32_STACK_SLOT: {
934 int input_slot_index = iterator->Next();
935 unsigned input_offset =
938 static_cast<uintptr_t
>(input_->
GetFrameSlot(input_offset));
940 if (FLAG_trace_deopt) {
941 PrintF(
" 0x%08" V8PRIxPTR
": ",
942 output_[frame_index]->GetTop() + output_offset);
943 PrintF(
"[top + %d] <- %" V8PRIuPTR
" ; [sp + %d] (uint32 %s)\n",
947 is_smi ?
"smi" :
"heap number");
950 intptr_t tagged_value =
951 reinterpret_cast<intptr_t
>(
Smi::FromInt(static_cast<int>(value)));
952 output_[frame_index]->
SetFrameSlot(output_offset, tagged_value);
956 AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
957 static_cast<double>(static_cast<uint32_t>(value)));
958 output_[frame_index]->
SetFrameSlot(output_offset, kPlaceholder);
963 case Translation::DOUBLE_STACK_SLOT: {
964 int input_slot_index = iterator->Next();
965 unsigned input_offset =
968 if (FLAG_trace_deopt) {
969 PrintF(
" 0x%08" V8PRIxPTR
": [top + %d] <- %e ; [sp + %d]\n",
970 output_[frame_index]->GetTop() + output_offset,
977 AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
978 output_[frame_index]->
SetFrameSlot(output_offset, kPlaceholder);
982 case Translation::LITERAL: {
983 Object* literal = ComputeLiteral(iterator->Next());
984 if (FLAG_trace_deopt) {
985 PrintF(
" 0x%08" V8PRIxPTR
": [top + %d] <- ",
986 output_[frame_index]->GetTop() + output_offset,
988 literal->ShortPrint();
991 intptr_t value =
reinterpret_cast<intptr_t
>(literal);
992 output_[frame_index]->
SetFrameSlot(output_offset, value);
996 case Translation::ARGUMENTS_OBJECT: {
997 int args_index = iterator->Next() + 1;
998 int args_length = iterator->Next() - 1;
999 if (FLAG_trace_deopt) {
1000 PrintF(
" 0x%08" V8PRIxPTR
": [top + %d] <- ",
1001 output_[frame_index]->GetTop() + output_offset,
1003 isolate_->
heap()->arguments_marker()->ShortPrint();
1004 PrintF(
" ; arguments object\n");
1008 intptr_t value =
reinterpret_cast<intptr_t
>(
1009 isolate_->
heap()->arguments_marker());
1011 output_[frame_index]->GetTop() + output_offset, args_length);
1012 output_[frame_index]->
SetFrameSlot(output_offset, value);
1015 for (
int i = 0; i < args_length; i++) {
1017 intptr_t input_value = input_->
GetFrameSlot(input_offset);
1018 AddArgumentsObjectValue(input_value);
1032 if (obj->IsHeapNumber()) {
1035 if (FLAG_trace_osr) {
1036 PrintF(
"**** %g could not be converted to int32 ****\n",
1050 static bool ObjectToUint32(
Object* obj, uint32_t* value) {
1058 if (obj->IsHeapNumber()) {
1061 if (FLAG_trace_osr) {
1062 PrintF(
"**** %g could not be converted to uint32 ****\n",
1076 bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
1077 int* input_offset) {
1083 uintptr_t input_value = input_->
GetFrameSlot(*input_offset);
1084 Object* input_object =
reinterpret_cast<Object*
>(input_value);
1088 bool duplicate = (opcode == Translation::DUPLICATE);
1094 case Translation::BEGIN:
1095 case Translation::JS_FRAME:
1096 case Translation::ARGUMENTS_ADAPTOR_FRAME:
1097 case Translation::CONSTRUCT_STUB_FRAME:
1098 case Translation::GETTER_STUB_FRAME:
1099 case Translation::SETTER_STUB_FRAME:
1100 case Translation::DUPLICATE:
1105 int output_reg = iterator->Next();
1106 if (FLAG_trace_osr) {
1107 PrintF(
" %s <- 0x%08" V8PRIxPTR
" ; [sp + %d]\n",
1112 output->SetRegister(output_reg, input_value);
1116 case Translation::INT32_REGISTER: {
1118 if (!ObjectToInt32(input_object, &int32_value))
return false;
1120 int output_reg = iterator->Next();
1121 if (FLAG_trace_osr) {
1122 PrintF(
" %s <- %d (int32) ; [sp + %d]\n",
1127 output->SetRegister(output_reg, int32_value);
1131 case Translation::UINT32_REGISTER: {
1132 uint32_t uint32_value = 0;
1133 if (!ObjectToUint32(input_object, &uint32_value))
return false;
1135 int output_reg = iterator->Next();
1136 if (FLAG_trace_osr) {
1137 PrintF(
" %s <- %u (uint32) ; [sp + %d]\n",
1142 output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
1146 case Translation::DOUBLE_REGISTER: {
1148 if (!input_object->IsNumber())
return false;
1150 int output_reg = iterator->Next();
1151 double double_value = input_object->Number();
1152 if (FLAG_trace_osr) {
1153 PrintF(
" %s <- %g (double) ; [sp + %d]\n",
1158 output->SetDoubleRegister(output_reg, double_value);
1162 case Translation::STACK_SLOT: {
1163 int output_index = iterator->Next();
1164 unsigned output_offset =
1165 output->GetOffsetFromSlotIndex(output_index);
1166 if (FLAG_trace_osr) {
1167 PrintF(
" [sp + %d] <- 0x%08" V8PRIxPTR
" ; [sp + %d] ",
1171 reinterpret_cast<Object*
>(input_value)->ShortPrint();
1174 output->SetFrameSlot(output_offset, input_value);
1178 case Translation::INT32_STACK_SLOT: {
1180 if (!ObjectToInt32(input_object, &int32_value))
return false;
1182 int output_index = iterator->Next();
1183 unsigned output_offset =
1184 output->GetOffsetFromSlotIndex(output_index);
1185 if (FLAG_trace_osr) {
1186 PrintF(
" [sp + %d] <- %d (int32) ; [sp + %d]\n",
1191 output->SetFrameSlot(output_offset, int32_value);
1195 case Translation::UINT32_STACK_SLOT: {
1196 uint32_t uint32_value = 0;
1197 if (!ObjectToUint32(input_object, &uint32_value))
return false;
1199 int output_index = iterator->Next();
1200 unsigned output_offset =
1201 output->GetOffsetFromSlotIndex(output_index);
1202 if (FLAG_trace_osr) {
1203 PrintF(
" [sp + %d] <- %u (uint32) ; [sp + %d]\n",
1208 output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value));
1212 case Translation::DOUBLE_STACK_SLOT: {
1217 if (!input_object->IsNumber())
return false;
1219 int output_index = iterator->Next();
1220 unsigned output_offset =
1221 output->GetOffsetFromSlotIndex(output_index);
1222 double double_value = input_object->Number();
1223 uint64_t int_value = BitCast<uint64_t, double>(double_value);
1226 if (FLAG_trace_osr) {
1227 PrintF(
" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
1228 output_offset + kUpperOffset,
1232 PrintF(
" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
1233 output_offset + kLowerOffset,
1238 output->SetFrameSlot(output_offset + kLowerOffset, lower);
1239 output->SetFrameSlot(output_offset + kUpperOffset, upper);
1243 case Translation::LITERAL: {
1249 case Translation::ARGUMENTS_OBJECT: {
1266 Code* replacement_code) {
1269 ASSERT(unoptimized_code->
kind() == Code::FUNCTION);
1274 for (uint32_t i = 0; i < table_length; ++i) {
1281 stack_check_cursor += 2 *
kIntSize;
1288 Code* replacement_code) {
1291 ASSERT(unoptimized_code->
kind() == Code::FUNCTION);
1296 for (uint32_t i = 0; i < table_length; ++i) {
1303 stack_check_cursor += 2 *
kIntSize;
1308 unsigned Deoptimizer::ComputeInputFrameSize()
const {
1309 unsigned fixed_size = ComputeFixedSize(function_);
1312 unsigned result = fixed_size + fp_to_sp_delta_ - (2 *
kPointerSize);
1314 if (bailout_type_ ==
OSR) {
1320 unsigned stack_slots = optimized_code_->
stack_slots();
1321 unsigned outgoing_size = ComputeOutgoingArgumentSize();
1329 unsigned Deoptimizer::ComputeFixedSize(JSFunction*
function)
const {
1332 return ComputeIncomingArgumentSize(
function) +
1337 unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction*
function)
const {
1340 unsigned arguments =
function->shared()->formal_parameter_count() + 1;
1345 unsigned Deoptimizer::ComputeOutgoingArgumentSize()
const {
1347 optimized_code_->deoptimization_data());
1348 unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
1353 Object* Deoptimizer::ComputeLiteral(
int index)
const {
1355 optimized_code_->deoptimization_data());
1356 FixedArray* literals = data->LiteralArray();
1357 return literals->
get(index);
1361 void Deoptimizer::AddArgumentsObject(intptr_t slot_address,
int argc) {
1362 ArgumentsObjectMaterializationDescriptor object_desc(
1363 reinterpret_cast<Address>(slot_address), argc);
1364 deferred_arguments_objects_.
Add(object_desc);
1368 void Deoptimizer::AddArgumentsObjectValue(intptr_t value) {
1369 deferred_arguments_objects_values_.Add(reinterpret_cast<Object*>(value));
1373 void Deoptimizer::AddDoubleValue(intptr_t slot_address,
double value) {
1374 HeapNumberMaterializationDescriptor value_desc(
1375 reinterpret_cast<Address>(slot_address), value);
1376 deferred_heap_numbers_.
Add(value_desc);
1380 MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
1387 MacroAssembler masm(Isolate::Current(),
NULL, 16 *
KB);
1388 masm.set_emit_debug_code(
false);
1389 GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
1391 masm.GetCode(&desc);
1392 ASSERT(desc.reloc_size == 0);
1394 MemoryChunk* chunk =
1395 Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
1398 ASSERT(chunk->area_size() >= desc.instr_size);
1399 if (chunk ==
NULL) {
1402 memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
1408 Code* Deoptimizer::FindDeoptimizingCodeFromAddress(
Address addr) {
1410 Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
1411 while (node !=
NULL) {
1412 if (node->code()->contains(addr))
return *node->code();
1413 node = node->next();
1419 void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
1420 DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
1421 ASSERT(data->deoptimizing_code_list_ !=
NULL);
1425 while (current !=
NULL) {
1426 if (*current->code() ==
code) {
1429 data->deoptimizing_code_list_ = current->next();
1431 prev->set_next(current->next());
1438 current = current->next();
1446 static Object* CutOutRelatedFunctionsList(Context* context,
1449 Object* result_list_head = undefined;
1453 JSFunction* prev =
NULL;
1454 while (current != undefined) {
1456 current = func->next_function_link();
1457 if (func->code() ==
code) {
1458 func->set_next_function_link(result_list_head);
1459 result_list_head = func;
1461 prev->set_next_function_link(current);
1472 return result_list_head;
1482 Object* undefined = Isolate::Current()->heap()->undefined_value();
1483 Object* current = CutOutRelatedFunctionsList(context, code, undefined);
1485 while (current != undefined) {
1487 current = func->next_function_link();
1489 func->set_next_function_link(undefined);
1495 JSFunction*
function)
1496 : frame_size_(frame_size),
1497 function_(function),
1501 context_(kZapUint32) {
1508 for (
unsigned o = 0; o < frame_size; o +=
kPointerSize) {
1514 int FrameDescription::ComputeFixedSize() {
1521 if (slot_index >= 0) {
1537 case StackFrame::JAVA_SCRIPT:
1538 return function_->shared()->formal_parameter_count();
1542 return reinterpret_cast<Smi*
>(*GetFrameSlotPointer(0))->value();
1556 return reinterpret_cast<Object*
>(*GetFrameSlotPointer(offset));
1561 ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
1568 ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
1570 return reinterpret_cast<Object*
>(*GetFrameSlotPointer(offset));
1574 void TranslationBuffer::Add(
int32_t value,
Zone* zone) {
1576 bool is_negative = (value < 0);
1577 uint32_t bits = ((is_negative ? -value : value) << 1) |
1578 static_cast<int32_t>(is_negative);
1582 uint32_t next = bits >> 7;
1583 contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone);
1585 }
while (bits != 0);
1589 int32_t TranslationIterator::Next() {
1593 for (
int i = 0;
true; i += 7) {
1595 uint8_t next =
buffer_->get(index_++);
1596 bits |= (next >> 1) << i;
1597 if ((next & 1) == 0)
break;
1600 bool is_negative = (bits & 1) == 1;
1602 return is_negative ? -result : result;
1606 Handle<ByteArray> TranslationBuffer::CreateByteArray() {
1607 int length = contents_.length();
1608 Handle<ByteArray> result =
1609 Isolate::Current()->factory()->NewByteArray(length,
TENURED);
1610 memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
1615 void Translation::BeginConstructStubFrame(
int literal_id,
unsigned height) {
1616 buffer_->Add(CONSTRUCT_STUB_FRAME, zone());
1617 buffer_->Add(literal_id, zone());
1622 void Translation::BeginGetterStubFrame(
int literal_id) {
1623 buffer_->Add(GETTER_STUB_FRAME, zone());
1624 buffer_->Add(literal_id, zone());
1628 void Translation::BeginSetterStubFrame(
int literal_id) {
1629 buffer_->Add(SETTER_STUB_FRAME, zone());
1630 buffer_->Add(literal_id, zone());
1634 void Translation::BeginArgumentsAdaptorFrame(
int literal_id,
unsigned height) {
1635 buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
1636 buffer_->Add(literal_id, zone());
1641 void Translation::BeginJSFrame(BailoutId node_id,
1644 buffer_->Add(JS_FRAME, zone());
1645 buffer_->Add(node_id.ToInt(), zone());
1646 buffer_->Add(literal_id, zone());
1651 void Translation::StoreRegister(Register reg) {
1653 buffer_->Add(reg.code(), zone());
1657 void Translation::StoreInt32Register(Register reg) {
1658 buffer_->Add(INT32_REGISTER, zone());
1659 buffer_->Add(reg.code(), zone());
1663 void Translation::StoreUint32Register(Register reg) {
1664 buffer_->Add(UINT32_REGISTER, zone());
1665 buffer_->Add(reg.code(), zone());
1670 buffer_->Add(DOUBLE_REGISTER, zone());
1675 void Translation::StoreStackSlot(
int index) {
1676 buffer_->Add(STACK_SLOT, zone());
1681 void Translation::StoreInt32StackSlot(
int index) {
1682 buffer_->Add(INT32_STACK_SLOT, zone());
1687 void Translation::StoreUint32StackSlot(
int index) {
1688 buffer_->Add(UINT32_STACK_SLOT, zone());
1693 void Translation::StoreDoubleStackSlot(
int index) {
1694 buffer_->Add(DOUBLE_STACK_SLOT, zone());
1699 void Translation::StoreLiteral(
int literal_id) {
1700 buffer_->Add(LITERAL, zone());
1701 buffer_->Add(literal_id, zone());
1705 void Translation::StoreArgumentsObject(
int args_index,
int args_length) {
1706 buffer_->Add(ARGUMENTS_OBJECT, zone());
1707 buffer_->Add(args_index, zone());
1708 buffer_->Add(args_length, zone());
1712 void Translation::MarkDuplicate() {
1713 buffer_->Add(DUPLICATE, zone());
1717 int Translation::NumberOfOperandsFor(
Opcode opcode) {
1721 case GETTER_STUB_FRAME:
1722 case SETTER_STUB_FRAME:
1724 case INT32_REGISTER:
1725 case UINT32_REGISTER:
1726 case DOUBLE_REGISTER:
1728 case INT32_STACK_SLOT:
1729 case UINT32_STACK_SLOT:
1730 case DOUBLE_STACK_SLOT:
1734 case ARGUMENTS_ADAPTOR_FRAME:
1735 case CONSTRUCT_STUB_FRAME:
1736 case ARGUMENTS_OBJECT:
1746 #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
1748 const char* Translation::StringFor(
Opcode opcode) {
1754 case ARGUMENTS_ADAPTOR_FRAME:
1755 return "ARGUMENTS_ADAPTOR_FRAME";
1756 case CONSTRUCT_STUB_FRAME:
1757 return "CONSTRUCT_STUB_FRAME";
1758 case GETTER_STUB_FRAME:
1759 return "GETTER_STUB_FRAME";
1760 case SETTER_STUB_FRAME:
1761 return "SETTER_STUB_FRAME";
1764 case INT32_REGISTER:
1765 return "INT32_REGISTER";
1766 case UINT32_REGISTER:
1767 return "UINT32_REGISTER";
1768 case DOUBLE_REGISTER:
1769 return "DOUBLE_REGISTER";
1771 return "STACK_SLOT";
1772 case INT32_STACK_SLOT:
1773 return "INT32_STACK_SLOT";
1774 case UINT32_STACK_SLOT:
1775 return "UINT32_STACK_SLOT";
1776 case DOUBLE_STACK_SLOT:
1777 return "DOUBLE_STACK_SLOT";
1780 case ARGUMENTS_OBJECT:
1781 return "ARGUMENTS_OBJECT";
1793 GlobalHandles* global_handles = Isolate::Current()->global_handles();
1796 global_handles->
MakeWeak(reinterpret_cast<Object**>(code_.location()),
1798 Deoptimizer::HandleWeakDeoptimizedCode);
1803 GlobalHandles* global_handles = Isolate::Current()->global_handles();
1804 global_handles->
Destroy(reinterpret_cast<Object**>(code_.location()));
1811 SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
1818 case Translation::BEGIN:
1819 case Translation::JS_FRAME:
1820 case Translation::ARGUMENTS_ADAPTOR_FRAME:
1821 case Translation::CONSTRUCT_STUB_FRAME:
1822 case Translation::GETTER_STUB_FRAME:
1823 case Translation::SETTER_STUB_FRAME:
1827 case Translation::ARGUMENTS_OBJECT:
1832 case Translation::INT32_REGISTER:
1833 case Translation::UINT32_REGISTER:
1834 case Translation::DOUBLE_REGISTER:
1835 case Translation::DUPLICATE:
1841 case Translation::STACK_SLOT: {
1842 int slot_index = iterator->Next();
1843 Address slot_addr = SlotAddress(frame, slot_index);
1844 return SlotRef(slot_addr, SlotRef::TAGGED);
1847 case Translation::INT32_STACK_SLOT: {
1848 int slot_index = iterator->Next();
1849 Address slot_addr = SlotAddress(frame, slot_index);
1850 return SlotRef(slot_addr, SlotRef::INT32);
1853 case Translation::UINT32_STACK_SLOT: {
1854 int slot_index = iterator->Next();
1855 Address slot_addr = SlotAddress(frame, slot_index);
1856 return SlotRef(slot_addr, SlotRef::UINT32);
1859 case Translation::DOUBLE_STACK_SLOT: {
1860 int slot_index = iterator->Next();
1861 Address slot_addr = SlotAddress(frame, slot_index);
1862 return SlotRef(slot_addr, SlotRef::DOUBLE);
1865 case Translation::LITERAL: {
1866 int literal_index = iterator->Next();
1867 return SlotRef(data->LiteralArray()->
get(literal_index));
1876 void SlotRef::ComputeSlotsForArguments(Vector<SlotRef>* args_slots,
1877 TranslationIterator* it,
1878 DeoptimizationInputData* data,
1879 JavaScriptFrame* frame) {
1883 it->Skip(Translation::NumberOfOperandsFor(
1884 static_cast<Translation::Opcode>(it->Next())));
1887 for (
int i = 0; i < args_slots->length(); ++i) {
1888 (*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame);
1893 Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
1894 JavaScriptFrame* frame,
1895 int inlined_jsframe_index,
1897 AssertNoAllocation no_gc;
1898 int deopt_index = Safepoint::kNoDeoptimizationIndex;
1899 DeoptimizationInputData* data =
1900 static_cast<OptimizedFrame*
>(frame)->GetDeoptimizationData(&deopt_index);
1901 TranslationIterator it(data->TranslationByteArray(),
1902 data->TranslationIndex(deopt_index)->value());
1904 ASSERT(opcode == Translation::BEGIN);
1906 int jsframe_count = it.Next();
1908 ASSERT(jsframe_count > inlined_jsframe_index);
1909 int jsframes_to_skip = inlined_jsframe_index;
1912 if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
1913 if (jsframes_to_skip == 0) {
1914 ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
1917 int height = it.Next();
1921 Vector<SlotRef> args_slots =
1923 ComputeSlotsForArguments(&args_slots, &it, data, frame);
1926 }
else if (opcode == Translation::JS_FRAME) {
1927 if (jsframes_to_skip == 0) {
1929 it.Skip(Translation::NumberOfOperandsFor(opcode));
1935 Vector<SlotRef> args_slots =
1937 ComputeSlotsForArguments(&args_slots, &it, data, frame);
1944 it.Skip(Translation::NumberOfOperandsFor(opcode));
1948 return Vector<SlotRef>();
1951 #ifdef ENABLE_DEBUGGER_SUPPORT
1953 DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
1955 bool has_arguments_adaptor,
1956 bool has_construct_stub) {
1957 FrameDescription* output_frame = deoptimizer->output_[frame_index];
1958 function_ = output_frame->GetFunction();
1959 has_construct_stub_ = has_construct_stub;
1960 expression_count_ = output_frame->GetExpressionCount();
1961 expression_stack_ =
new Object*[expression_count_];
1964 Code* code =
Code::cast(Isolate::Current()->heap()->FindCodeObject(pc));
1965 source_position_ = code->SourcePosition(pc);
1967 for (
int i = 0; i < expression_count_; i++) {
1968 SetExpression(i, output_frame->GetExpression(i));
1971 if (has_arguments_adaptor) {
1972 output_frame = deoptimizer->output_[frame_index - 1];
1976 parameters_count_ = output_frame->ComputeParametersCount();
1977 parameters_ =
new Object*[parameters_count_];
1978 for (
int i = 0; i < parameters_count_; i++) {
1979 SetParameter(i, output_frame->GetParameter(i));
1984 DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
1985 delete[] expression_stack_;
1986 delete[] parameters_;
1990 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
1991 v->VisitPointer(BitCast<Object**>(&function_));
1992 v->VisitPointers(parameters_, parameters_ + parameters_count_);
1993 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
1996 #endif // ENABLE_DEBUGGER_SUPPORT
DeoptimizingCodeListNode * next() const
void Destroy(Object **location)
Object * function() const
static Object *& Object_at(Address addr)
void PrintF(const char *format,...)
BailoutId AstId(int index)
Handle< Code > code() const
void SourceCodePrint(StringStream *accumulator, int max_length)
static Smi * FromInt(int value)
unsigned stack_check_table_offset()
unsigned GetOffsetFromSlotIndex(int slot_index)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the snapshot(mksnapshot only)") DEFINE_bool(help
static void RevertStackCheckCode(Code *unoptimized_code, Code *check_code, Code *replacement_code)
void SetFrameSlot(unsigned offset, intptr_t value)
static void ComputeOutputFrames(Deoptimizer *deoptimizer)
static Handle< T > cast(Handle< S > that)
virtual void EnterContext(Context *context)
Object * GetExpression(int index) const
#define ASSERT(condition)
static void DeoptimizeFunction(JSFunction *function)
int ComputeExpressionsCount() const
static Context * cast(Object *context)
static void DeoptimizeAll()
double GetDoubleRegister(unsigned n) const
void MaterializeHeapObjects(JavaScriptFrameIterator *it)
Handle< Object > NewNumber(double value, PretenureFlag pretenure=NOT_TENURED)
void SetFrameType(StackFrame::Type type)
Handle< JSObject > NewArgumentsObject(Handle< Object > callee, int length)
StringInputBuffer *const buffer_
DeoptimizingCodeListNode(Code *code)
virtual void VisitFunction(JSFunction *function)
static Code * cast(Object *obj)
static Smi * cast(Object *object)
static const char * AllocationIndexToString(int index)
static const int kNumRegisters
virtual void LeaveContext(Context *context)
void ClearOptimizedFunctions()
SmartArrayPointer< char > ToCString(AllowNullsFlag allow_nulls, RobustnessFlag robustness_flag, int offset, int length, int *length_output=0)
int ComputeParametersCount()
JSFunction * GetFunction() const
static void DeoptimizeGlobalObject(JSObject *object)
DwVfpRegister DoubleRegister
virtual const char * NameOfCPURegister(int reg) const
Context * native_context()
Handle< Object > Create(Object *value)
static bool IsValid(intptr_t value)
Smi * PcAndState(int index)
byte * instruction_start()
void SetExpression(int index, Object *value)
unsigned int FastD2UI(double x)
Object * OptimizedFunctionsListHead()
void SetRegister(unsigned n, intptr_t value)
friend class DeoptimizingCodeListNode
Handle< FixedArray > NewFixedArray(int size, PretenureFlag pretenure=NOT_TENURED)
static int GetDeoptimizedCodeCount(Isolate *isolate)
unsigned GetExpressionCount()
static const char * State2String(State state)
static const int kFixedFrameSize
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
static int GetOutputInfo(DeoptimizationOutputData *data, BailoutId node_id, SharedFunctionInfo *shared)
double FastUI2D(unsigned x)
static void ReplaceCodeForRelatedFunctions(JSFunction *function, Code *code)
static void PatchStackCheckCode(Code *unoptimized_code, Code *check_code, Code *replacement_code)
~DeoptimizingCodeListNode()
static Vector< T > New(int length)
static MaybeObject * FunctionGetArguments(Object *object, void *)
DeoptimizerData * deoptimizer_data()
uint32_t GetFrameSize() const
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset kHiddenPrototypeBit kReadOnlyPrototypeBit kIsTopLevelBit kAllowLazyCompilation kUsesArguments formal_parameter_count
static void VisitAllOptimizedFunctionsForContext(Context *context, OptimizedFunctionVisitor *visitor)
#define ASSERT_LT(v1, v2)
static void RevertStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
intptr_t GetFrameSlot(unsigned offset)
static HeapNumber * cast(Object *obj)
JavaScriptFrameIteratorTemp< StackFrameIterator > JavaScriptFrameIterator
friend class DeoptimizedFrameInfo
static Address GetDeoptimizationEntry(int id, BailoutType type)
static int ToAllocationIndex(DwVfpRegister reg)
static Deoptimizer * Grab(Isolate *isolate)
static const int kNotDeoptimizationEntry
#define ASSERT_EQ(v1, v2)
friend class FrameDescription
StackFrame::Type GetFrameType() const
#define ASSERT_NE(v1, v2)
double GetDoubleFrameSlot(unsigned offset)
FrameDescription(uint32_t frame_size, JSFunction *function)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor *visitor)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static uint32_t & uint32_at(Address addr)
#define RUNTIME_ENTRY(name, nargs, ressize)
static void VisitAllOptimizedFunctionsForGlobalObject(JSObject *object, OptimizedFunctionVisitor *visitor)
int ConvertJSFrameIndexToFrameIndex(int jsframe_index)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void FlushICache(void *start, size_t size)
void MakeWeak(Object **location, void *parameter, WeakReferenceCallback callback)
intptr_t GetRegister(unsigned n) const
static GlobalObject * cast(Object *obj)
SmartArrayPointer< const char > ToCString() const
void set_code(Code *code)
int jsframe_count() const
static const int kMaxValue
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
static int GetDeoptimizationId(Address addr, BailoutType type)
bool has_adapted_arguments() const
static JSObject * cast(Object *obj)
static void PatchStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
static Deoptimizer * New(JSFunction *function, BailoutType type, unsigned bailout_id, Address from, int fp_to_sp_delta, Isolate *isolate)
static int input_offset()
Object * GetExpression(int index)
Object * GetParameter(int index)
static JSFunction * cast(Object *obj)